content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(rbokeh)
### Name: ly_quantile
### Title: Add a "quantile" layer to a Bokeh figure
### Aliases: ly_quantile
### ** Examples
figure(legend_location = "top_left") %>%
ly_quantile(Sepal.Length, group = Species, data = iris)
|
/data/genthat_extracted_code/rbokeh/examples/ly_quantile.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 237
|
r
|
library(rbokeh)
### Name: ly_quantile
### Title: Add a "quantile" layer to a Bokeh figure
### Aliases: ly_quantile
### ** Examples
figure(legend_location = "top_left") %>%
ly_quantile(Sepal.Length, group = Species, data = iris)
|
#########################################################################
#Balanced sampling to get even distribution of samples through feature space
#########################################################################
install.packages("sampling")
library("sampling", lib.loc="~/R/win-library/3.0")
##output of environmental variable analysis e.g. water temperature salinity etc
attach(n1)
##weighted distribution with probability for inclusion based on StDev value
##see https://docs.google.com/a/oceandtm.com/file/d/0ByK_t_0IPCScWlVQQjRPU1hiTzQ/edit?usp=drivesdk
##page 47 (orig link=http://www.eustat.es/productosServicios/52.2_balanced_sampling.pdf)
X<-cbind(xsd,lon,lat,level,tmp1,tmp7,tmp14,one=rep(1,length(xsd)))
pik=inclusionprobabilities(X[,1],100)
s=samplecube(X,pik)
##export the selected samples and join them to the summary stats
temp_sel<-n1[,c("lon","lat","level","xsd")]
temp_sel$prob<-s
sample_pts <- temp_sel[ which(temp_sel$prob > 0), ]
##Random weighted sampling based on raster values
install.packages('raster')
library(raster)
r <- raster(file.choose()) # note forward slashes in path, not backslashes
n <- 100
cells <- sample(x=seq_len(ncell(r)), size=n, prob=values(r))
d <- data.frame(popdens=values(r)[cells])
coords <- xyFromCell(r, cells, spatial=TRUE) # NB: CRS here is WGS84... you can get the appropriate proj4 string from www.spatialreference.org
pts <- SpatialPointsDataFrame(coords, d, proj4string=CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'))
writeOGR(pts, '.', 'randpts100v3', 'ESRI Shapefile')
plot(pts)
Now, with comments to explain what's going on:
install.packages('raster') library(raster) ## Create dummy raster (see commented line below for reading in true data)
# This creates a 10000 x 100 matrix of random uniform numbers between 0 and 1000.
#r <- raster(matrix(runif(1000000, 0, 1000), ncol=100))
r <- raster('path/to/raster') # note forward slashes in path, not backslashes
## Random sample of n raster cells, weighted by pop density (i.e. weighted by the values of the raster).
# Here, we take a sample of size n from the numbers 1 through 1,000,000 (seq_len(ncell(r)) produces a vector of the integers from 1 to the number of cells of r), where the probability weights of each of those numbers is equal to the values of r, i.e. our dummy population density data.
n <- 1000
cells <- sample(x=seq_len(ncell(r)), size=n, prob=values(r))
# We then create a data.frame with a single column, popdens, which contains just the pop density for the sampled cells. values(r)[cells] subsets the vector of pop densities, values(r), to just those values that correspond to the sampled cells.
d <- data.frame(popdens=values(r)[cells])
## Extract lat & lon of the chosen cells and return as a SpatialPoints object
# The xyFromCell function identifies the cell-center coordinates for given cells (in this case, cells, the cell numbers of the sampled cells) of the raster object (in this case r, our raster of pop densities). Setting spatial=TRUE returns the object as a SpatialPoints object, which facilitates writing out to a shapefile.
coords <- xyFromCell(r, cells, spatial=TRUE) ## combine coords with data to create SpatialPointsDataFrame object
# NB: CRS here is WGS84... you can get appropriate proj4 string from www.spatialreference.org
pts <- SpatialPointsDataFrame(coords, d, proj4string=CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'))
## write out to ESRI shapefile # requires gdal installed on system
writeOGR(pts, '.', 'testpopdens', 'ESRI Shapefile')
|
/selected_sampling_Balanced_Weighted.R
|
no_license
|
ckwkalk/MUNnetCDF
|
R
| false
| false
| 3,562
|
r
|
#########################################################################
#Balanced sampling to get even distribution of samples through feature space
#########################################################################
install.packages("sampling")
library("sampling", lib.loc="~/R/win-library/3.0")
##output of environmental variable analysis e.g. water temperature salinity etc
attach(n1)
##weighted distribution with probability for inclusion based on StDev value
##see https://docs.google.com/a/oceandtm.com/file/d/0ByK_t_0IPCScWlVQQjRPU1hiTzQ/edit?usp=drivesdk
##page 47 (orig link=http://www.eustat.es/productosServicios/52.2_balanced_sampling.pdf)
X<-cbind(xsd,lon,lat,level,tmp1,tmp7,tmp14,one=rep(1,length(xsd)))
pik=inclusionprobabilities(X[,1],100)
s=samplecube(X,pik)
##export the selected samples and join them to the summary stats
temp_sel<-n1[,c("lon","lat","level","xsd")]
temp_sel$prob<-s
sample_pts <- temp_sel[ which(temp_sel$prob > 0), ]
##Random weighted sampling based on raster values
install.packages('raster')
library(raster)
r <- raster(file.choose()) # note forward slashes in path, not backslashes
n <- 100
cells <- sample(x=seq_len(ncell(r)), size=n, prob=values(r))
d <- data.frame(popdens=values(r)[cells])
coords <- xyFromCell(r, cells, spatial=TRUE) # NB: CRS here is WGS84... you can get the appropriate proj4 string from www.spatialreference.org
pts <- SpatialPointsDataFrame(coords, d, proj4string=CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'))
writeOGR(pts, '.', 'randpts100v3', 'ESRI Shapefile')
plot(pts)
Now, with comments to explain what's going on:
install.packages('raster') library(raster) ## Create dummy raster (see commented line below for reading in true data)
# This creates a 10000 x 100 matrix of random uniform numbers between 0 and 1000.
#r <- raster(matrix(runif(1000000, 0, 1000), ncol=100))
r <- raster('path/to/raster') # note forward slashes in path, not backslashes
## Random sample of n raster cells, weighted by pop density (i.e. weighted by the values of the raster).
# Here, we take a sample of size n from the numbers 1 through 1,000,000 (seq_len(ncell(r)) produces a vector of the integers from 1 to the number of cells of r), where the probability weights of each of those numbers is equal to the values of r, i.e. our dummy population density data.
n <- 1000
cells <- sample(x=seq_len(ncell(r)), size=n, prob=values(r))
# We then create a data.frame with a single column, popdens, which contains just the pop density for the sampled cells. values(r)[cells] subsets the vector of pop densities, values(r), to just those values that correspond to the sampled cells.
d <- data.frame(popdens=values(r)[cells])
## Extract lat & lon of the chosen cells and return as a SpatialPoints object
# The xyFromCell function identifies the cell-center coordinates for given cells (in this case, cells, the cell numbers of the sampled cells) of the raster object (in this case r, our raster of pop densities). Setting spatial=TRUE returns the object as a SpatialPoints object, which facilitates writing out to a shapefile.
coords <- xyFromCell(r, cells, spatial=TRUE) ## combine coords with data to create SpatialPointsDataFrame object
# NB: CRS here is WGS84... you can get appropriate proj4 string from www.spatialreference.org
pts <- SpatialPointsDataFrame(coords, d, proj4string=CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'))
## write out to ESRI shapefile # requires gdal installed on system
writeOGR(pts, '.', 'testpopdens', 'ESRI Shapefile')
|
\alias{gtkCellRendererEditingCanceled}
\name{gtkCellRendererEditingCanceled}
\title{gtkCellRendererEditingCanceled}
\description{
Causes the cell renderer to emit the "editing-canceled" signal. This
function is for use only by implementations of cell renderers that need to
notify the client program that an editing process was canceled and the
changes were not committed.
\strong{WARNING: \code{gtk_cell_renderer_editing_canceled} has been deprecated since version 2.6 and should not be used in newly-written code. Use \code{\link{gtkCellRendererStopEditing}} instead}
}
\usage{gtkCellRendererEditingCanceled(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkCellRenderer}}] A \code{\link{GtkCellRenderer}}}}
\details{ Since 2.4}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkCellRendererEditingCanceled.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false
| false
| 813
|
rd
|
\alias{gtkCellRendererEditingCanceled}
\name{gtkCellRendererEditingCanceled}
\title{gtkCellRendererEditingCanceled}
\description{
Causes the cell renderer to emit the "editing-canceled" signal. This
function is for use only by implementations of cell renderers that need to
notify the client program that an editing process was canceled and the
changes were not committed.
\strong{WARNING: \code{gtk_cell_renderer_editing_canceled} has been deprecated since version 2.6 and should not be used in newly-written code. Use \code{\link{gtkCellRendererStopEditing}} instead}
}
\usage{gtkCellRendererEditingCanceled(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkCellRenderer}}] A \code{\link{GtkCellRenderer}}}}
\details{ Since 2.4}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
# general utilities --------------------------------------------------------
# a general argument checking function
check_arguments <- function(station = NULL, year = NULL,
month = NULL, dir = NULL, context = NA) {
# test internet connection
if (!connected_to_internet()) {
stop_no_internet()
}
# checking the "station" argument
if (context %in% c("flights", "weather")) {
if (!all(is.character(station))) {
stop_glue("At least one of the provided station arguments, ",
"{list(station)}, wasn't a character string. Have you ",
"surrounded the airport names in quotes?")
}
if (!all(station %in% get_airports()$faa)) {
stop_glue("Couldn't find at least one of the provided origin airports ",
"{list(station)}. Please consider using the get_airports() function ",
"to locate the desired FAA LID code!")
}
}
# checking the "year" argument
if (context %in% c("flights", "planes", "weather")) {
if (!is.numeric(year)) {
stop_glue("The provided `year` argument has class {class(year)}, but ",
"it needs to be a numeric. Have you put the year in quotes?")
}
if (year > as.numeric(substr(Sys.Date(), 1, 4))) {
stop_glue("The provided `year` is in the future. Oops. :-)")
}
if (year == as.numeric(substr(Sys.Date(), 1, 4))) {
stop_glue("The data for this year isn't quite available yet. The data ",
"for the previous year usually is released in February ",
"or March!")
}
if (year < 1987) {
stop_glue("Your `year` argument {year} is really far back in time! ",
"`anyflights` data sources do not provide data this old.")
}
if (year < 2013 & context == "planes") {
warning_glue("Planes data was not formatted consistently before 2013. ",
"Please use caution.")
} else if (context != "planes" & year < 2010) {
message_glue("Queries before 2010 are untested by the package. ",
"Please use caution!")
}
}
# checking the "month" argument
if (context %in% c("flights", "weather")) {
if (!is.numeric(month)) {
stop_glue("The provided `month` argument has class {class(month)}, but ",
"it needs to be a numeric. Have you put the months in quotes?")
}
if (any(month > 12 | month < 1)) {
stop_glue("Please enter only month values within 1 to 12.")
}
}
if (!is.null(dir)) {
if (!dir.exists(dir)) {
dir_ <- tryCatch(dir.create(dir), error = function(e) e)
if (inherits(dir_, "error")) {
stop_glue("anyflights had trouble making the folder specified by ",
"the directory argument {dir}. Here's the error: \n {dir_}")
}
}
}
}
# a function derived from simonpcouch/gbfs to check if a URL exists
url_exists <- function(x, quiet = FALSE, ...) {
capture_error <- function(code, otherwise = NULL, quiet = TRUE) {
tryCatch(
list(result = code, error = NULL),
error = function(e) {
list(result = otherwise, error = e)
}
)
}
safely <- function(.f, otherwise = NULL, quiet = TRUE) {
function(...) capture_error(.f(...), otherwise, quiet)
}
sHEAD <- safely(httr::HEAD)
sGET <- safely(httr::GET)
if (!stringr::str_detect(x, "http")) {
x <- paste0("https://", x)
}
res <- sHEAD(x, ...)
if (is.null(res$result)) {
res <- sGET(x, ...)
if (is.null(res$result)) {
return(FALSE)
}
}
return(TRUE)
}
# a function to alert the user of no internet connection in a
# more informative/helpful way
stop_no_internet <- function() {
stop_glue("You don't seem to have an active internet connection. Please ",
"connect to the internet to use the anyflights package.")
return(list())
}
# a wrapper around has internet so that with_mock can be used in tests
connected_to_internet <- function() {
curl::has_internet()
}
stop_glue <- function(..., .sep = "", .envir = parent.frame(),
call. = FALSE, .domain = NULL) {
stop(
glue_null(..., .sep = .sep, .envir = .envir),
call. = call., domain = .domain
)
}
warning_glue <- function(..., .sep = "", .envir = parent.frame(),
call. = FALSE, .domain = NULL) {
warning(
glue_null(..., .sep = .sep, .envir = .envir),
call. = call., domain = .domain
)
}
message_glue <- function(..., .sep = "", .envir = parent.frame(),
.domain = NULL, .appendLF = TRUE) {
message(
glue_null(..., .sep = .sep, .envir = .envir),
domain = .domain, appendLF = .appendLF
)
}
# glue messages, warnings, and errors
glue_null <- function(..., .sep = "", .envir = parent.frame()) {
glue::glue(
..., .sep = .sep, .envir = .envir, .transformer = null_transformer
)
}
# actually print NULLs in output
null_transformer <- function(text, envir) {
out <- eval(parse(text = text, keep.source = FALSE), envir)
if (is.null(out)) {
return("NULL")
}
out
}
load_as <- function(filepath) {
new_env <- new.env()
data <- load(filepath, new_env)[1]
new_env[[data]]
}
skip_conditions <- function() {
internet <- curl::has_internet()
on_mac <- unname(Sys.info()["sysname"]) != "Darwin"
(!internet) | on_mac
}
download_file_wrapper <- function(url, file_path, quiet = TRUE){
out <- tryCatch(
utils::download.file(url, file_path, quiet = quiet),
error = function(e) {e}
)
if (inherits(out, "error")) {
stop_glue(
"\n\n\nutils::download.file timed out before finishing downloading the file. ",
"If you are repeatedly getting a timeout error, try extending the ",
"timeout period for your R session using ",
"options(timeout = timeout_value_in_seconds)\n\n\n")
}
out
}
# get_flights utilities --------------------------------------------------
download_month <- function(year, month, dir, flight_exdir, pb, diff_fn) {
# update the progress bar with the month being downloaded
write_tick(pb = pb, paste0(" Downloading Flights Data for ",
months[month],
"..."))
# put together the url for the relevant year and month
fl_url <- make_flights_url(year, month)
# make a temporary file to download to
flight_temp <- tempfile(fileext = ".zip")
# download the file
download_file_wrapper(fl_url, flight_temp, quiet = TRUE)
# ...and unzip it
flight_files <- utils::unzip(flight_temp, list = TRUE)
# only extract biggest file (its the one we want!)
flight_csv <- flight_files$Name[order(flight_files$Length,
decreasing = TRUE)[1]]
utils::unzip(flight_temp, exdir = flight_exdir,
junkpaths = TRUE, files = flight_csv)
# rename the file so that it's easier to find elsewhere
flight_src <- paste0(flight_exdir, "/", flight_csv)
flight_dst <- paste0(flight_exdir, "/", year, "-", month, ".csv")
file.rename(flight_src, flight_dst)
write_message(pb,
paste0("Downloaded Flights Data for ",
months[month]),
diff_fn)
}
get_flight_data <- function(path, station) {
# read in the data
suppressMessages(vroom::vroom(path,
progress = FALSE,
show_col_types = FALSE)) %>%
# select relevant columns
dplyr::select(
year = Year,
month = Month,
day = DayofMonth,
dep_time = DepTime,
sched_dep_time = CRSDepTime,
dep_delay = DepDelay,
arr_time = ArrTime,
sched_arr_time = CRSArrTime,
arr_delay = ArrDelay,
carrier = Reporting_Airline,
flight = Flight_Number_Reporting_Airline,
tailnum = Tail_Number,
origin = Origin,
dest = Dest,
air_time = AirTime,
distance = Distance) %>%
# only keep the relevant rows
dplyr::filter(origin %in% station) %>%
dplyr::mutate(
# convert column classes
dep_time = as.integer(dep_time),
sched_dep_time = as.integer(sched_dep_time),
flight = as.factor(flight),
# mutate some help time columns
hour = sched_dep_time %/% 100,
minute = sched_dep_time %% 100,
time_hour = lubridate::make_datetime(year, month, day, hour, 0, 0),
# cleanup NAs in the tailnum column
tailnum = dplyr::case_when(
tailnum == "" ~ NA_character_,
TRUE ~ tailnum),
# convert column types to match the original data
year = as.integer(year),
month = as.integer(month),
day = as.integer(day),
arr_time = as.integer(arr_time),
sched_arr_time = as.integer(sched_arr_time),
flight = as.integer(flight)
)
}
# given a year and month, this function returns the URL to query for the data
make_flights_url <- function(year, month) {
base_url <- "https://transtats.bts.gov/PREZIP/"
paste0(base_url,
"On_Time_Reporting_Carrier_On_Time_Performance_1987_present_",
year, "_", month, ".zip")
}
# get_airlines utilities ----------------------------------------------------
parse_flights_data_arg <- function(flights_data) {
# if it's a character vector, check if it's a filepath
if (is.character(flights_data)) {
if (file.exists(flights_data)) {
flights_data <- load_as(flights_data)
}
}
# now, if flights_data is a dataframe...
if (is.data.frame(flights_data)) {
# and carrier is in the column names
if ("carrier" %in% colnames(flights_data)) {
# just return the dataframe
return(flights_data)
}
}
# otherwise, return null
return(NULL)
}
# get_airports utilities -----------------------------------------------------
airports_cols <- readr::cols(
id = readr::col_integer(),
name = readr::col_character(),
city = readr::col_character(),
country = readr::col_character(),
faa = readr::col_character(),
icao = readr::col_character(),
lat = readr::col_double(),
lon = readr::col_double(),
alt = readr::col_double(),
tz = readr::col_double(),
dst = readr::col_character(),
tzone = readr::col_character(),
type = readr::col_character(),
source = readr::col_character()
)
# get_weather utilities -------------------------------------------------
weather_col_types <- readr::cols(
.default = readr::col_double(),
station = readr::col_character(),
valid = readr::col_datetime(format = ""),
skyc1 = readr::col_character(),
skyc2 = readr::col_character(),
skyc3 = readr::col_character(),
skyc4 = readr::col_logical(),
skyl4 = readr::col_logical(),
wxcodes = readr::col_character(),
peak_wind_time = readr::col_datetime(format = ""),
metar = readr::col_character()
)
process_month_arg <- function(month) {
start_month <- min(month)
end_month <- max(month)
last_day <- c(31, 28, 31, 30,
31, 30, 31, 31,
30, 31, 30, 31)[end_month]
return(c(start_month, end_month, last_day))
}
get_weather_for_station <- function(station, year, dir,
month_and_day_range, month) {
# query setup
weather_url <- "http://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?"
weather_query <- list(
station = station,
data = "all",
year1 = as.character(year),
month1 = as.character(month_and_day_range[1]),
day1 = "1",
year2 = as.character(year),
month2 = as.character(month_and_day_range[2]),
day2 = as.character(month_and_day_range[3]),
tz = "Etc/UTC",
format = "comma",
latlon = "no",
direct = "yes"
)
# query the data!
request <- httr::GET(weather_url,
query = weather_query,
httr::write_disk(paste0(dir,
"/weather_",
station,
".csv"),
overwrite = TRUE))
httr::stop_for_status(request)
# load the data, but fast !
weather_raw <- vroom::vroom(file = paste0(dir,
"/weather_",
station,
".csv"),
comment = "#",
na = "M",
col_names = TRUE,
col_types = weather_col_types)
# delete the raw data
unlink(paste0(dir, "/weather_", station, ".csv"))
# and return the tidied data object :-)
weather_raw %>%
# rename some columns
dplyr::rename(origin = station,
time = valid,
temp = tmpf,
dewp = dwpf,
humid = relh,
wind_dir = drct,
wind_speed = sknt,
wind_gust = gust,
precip = p01i,
pressure = mslp,
visib = vsby,
feels_like = feel) %>%
# get rid of the metadata column
dplyr::select(-metar) %>%
# mutate some new useful columns
dplyr::mutate(time = as.POSIXct(strptime(time, "%Y-%m-%d %H:%M")),
wind_speed = as.numeric(wind_speed) * 1.15078, # convert to mpg
wind_gust = as.numeric(wind_speed) * 1.15078,
year = as.integer(year),
month = as.integer(lubridate::month(time)),
day = lubridate::mday(time),
hour = lubridate::hour(time),
time_hour = ISOdatetime(year, month, day, hour, 0, 0)) %>%
# filter to only relevant rows - necessary for discontinuous month ranges
dplyr::filter(month %in% !!month) %>%
# remove duplicates / incompletes
dplyr::group_by(origin, month, day, hour) %>%
dplyr::filter(dplyr::row_number() == 1) %>%
dplyr::ungroup() %>%
# reorder columns to match the original dataset
dplyr::select(origin, year, month, day, hour, temp, dewp,
humid, wind_dir, wind_speed, wind_gust, precip,
pressure, visib, time_hour)
}
# get_planes utilities ------------------------------------------------------
get_planes_data <- function(year, dir, flights_data) {
# put together the url to query the planes data at
planes_src <- paste0(
"http://registry.faa.gov/database/yearly/ReleasableAircraft.",
year,
".zip"
)
# and a folder to save the planes data to
planes_lcl <- paste0(dir, "/planes")
if (!dir.exists(planes_lcl)) {dir.create(planes_lcl)}
# download the planes data
planes_tmp <- tempfile(fileext = ".zip")
download_file_wrapper(planes_src, planes_tmp, quiet = TRUE)
# ...and unzip it!
utils::unzip(planes_tmp, exdir = planes_lcl, junkpaths = TRUE)
# loading in and tidying the master planes data
planes_master <- process_planes_master(planes_lcl)
# loading in and tidying the planes reference data
planes_ref <- process_planes_ref(planes_lcl)
# join the master and ref data together
planes <- join_planes_data(planes_master, planes_ref)
# filter the planes data by the flights data, if relevant
planes <- join_planes_to_flights_data(planes, flights_data)
}
process_planes_master <- function(planes_lcl) {
suppressMessages(
# read in the data, but fast
planes_master <- vroom::vroom(paste0(planes_lcl, "/MASTER.txt"),
progress = FALSE) %>%
# the column names change every year, but the positions have stayed the
# same -- select by position :-(
dplyr::select(nnum = 1, code = 3, year = 5)
)
# delete the temporary folder
unlink(x = planes_lcl, recursive = TRUE)
planes_master
}
process_planes_ref <- function(planes_lcl) {
# 99.96% of the tailnumbers that were in the 2013 data are in the
# 2019 data -- similar numbers hold for 2015, 2017. since formatting
# is so unstable, just query the 2019 acftref data for now and join
# to the given year's master.txt data to get the accurate data
# for tailnums licensed in that year
if (!dir.exists(planes_lcl)) {dir.create(planes_lcl)}
# download the planes acftref data
planes_tmp <- tempfile(fileext = ".zip")
download_file_wrapper(
"http://registry.faa.gov/database/yearly/ReleasableAircraft.2019.zip",
planes_tmp,
quiet = TRUE)
# ...and unzip it!
utils::unzip(planes_tmp, exdir = planes_lcl, junkpaths = TRUE)
# read in the data, but fast
planes_ref <- vroom::vroom(paste0(planes_lcl,
"/",
"ACFTREF.txt"),
col_names = planes_ref_col_names,
col_types = planes_ref_col_types,
progress = FALSE,
skip = 1) %>%
dplyr::select(code, mfr, model, type_acft,
type_eng, no_eng, no_seats, speed)
# delete the temporary folder
unlink(x = planes_lcl, recursive = TRUE)
planes_ref
}
join_planes_data <- function(planes_master, planes_ref) {
planes_master %>%
dplyr::inner_join(planes_ref, by = "code") %>%
dplyr::select(-code) %>%
dplyr::mutate(speed = dplyr::if_else(speed == 0, NA_character_, speed),
no_eng = dplyr::if_else(no_eng == 0, NA_integer_, no_eng),
no_seats = dplyr::if_else(no_seats == 0, NA_integer_, no_seats),
engine = engine_types[type_eng + 1],
type = acft_types[type_acft],
tailnum = paste0("N", nnum),
year = as.integer(year),
speed = as.integer(speed)) %>%
dplyr::rename(manufacturer = mfr,
engines = no_eng,
seats = no_seats) %>%
dplyr::select(tailnum, year, type, manufacturer, model, engines,
seats, speed, engine)
}
planes_ref_col_names <- c("code", "mfr", "model", "type_acft", "type_eng", "ac",
"amat", "no_eng", "no_seats", "na1", "speed", "na2")
engine_types <- c("None", "Reciprocating", "Turbo-prop", "Turbo-shaft",
"Turbo-jet", "Turbo-fan", "Ramjet", "2 Cycle", "4 Cycle",
"Unknown", "Electric", "Rotary")
acft_types <- c("Glider", "Balloon", "Blimp/Dirigible",
"Fixed wing single engine", "Fixed wing multi engine",
"Rotorcraft", "Weight-shift-control", "Powered Parachute",
"Gyroplane")
planes_ref_col_types <- readr::cols(
code = readr::col_character(),
mfr = readr::col_character(),
model = readr::col_character(),
type_acft = readr::col_integer(),
type_eng = readr::col_integer(),
ac = readr::col_integer(),
amat = readr::col_integer(),
no_eng = readr::col_integer(),
no_seats = readr::col_integer(),
na1 = readr::col_character(),
speed = readr::col_character(),
na2 = readr::col_character()
)
# filter the planes data by the flights data, if relevant
join_planes_to_flights_data <- function(planes, flights_data) {
# interpret the flights_data argument
flights_data <- parse_flights_data_arg(flights_data)
# join to flights data if it was supplied
if (!is.null(flights_data)) {
planes <- planes %>%
dplyr::semi_join(flights_data, "tailnum") %>%
dplyr::arrange(tailnum)
}
planes
}
# create_flights_package utilities -------------------------------------
save_flights_data <- function(data, name) {
dir.create(paste0(name, "/data"))
purrr::map2(names(data), data, save_flights_dataset, name)
}
save_flights_dataset <- function(dataset_name, data, name) {
# save the dataset to file with the appropriate object name attached to it
assign(dataset_name, data)
save(list = dataset_name,
file = paste0(name, "/data/", dataset_name, ".rda"))
}
write_flights_documentation <- function(name) {
# check for which datasets are included in the package
which_data <- dir(paste0(name, "/data")) %>% stringr::str_replace(".rda", "")
# only write documentation for the relevant datasets
needed_docs <- sysdata[which_data]
# create the .R files in R/
purrr::map(paste0(name, "/R/", names(needed_docs), ".R"), file.create)
# write the .R data to them
purrr::map2(needed_docs,
paste0(name, "/R/", names(needed_docs), ".R"),
writeLines)
# generate .Rd documentation files
roxygen2::roxygenize(name)
invisible(TRUE)
}
check_as_flights_package_arguments <- function(data, name) {
# check the supplied data
if (!inherits(data, "list")) {
stop_glue("The `data` argument to `as_flights_package` must be a named ",
"list, but you've provided an object with class ",
"{list(class(data))}.")
}
if (is.null(names(data))) {
stop_glue("The `data` argument must have names.")
}
if (any(!names(data) %in% c("flights", "weather", "airlines",
"airports", "planes"))) {
stop_glue('Each of the names of the list for the `data` argument must be ',
'one of "flights", "weather", "airlines", "airports", ',
'or "planes".')
}
if (!suppressWarnings(requireNamespace("nycflights13", quietly = TRUE))) {
warning_glue(
"Some internal checks in as_flights_package make use of the nycflights13 ",
"package, but nycflights13 is not installed. To avoid warnings in the ",
'future, please install nycflights13 with `install.packages("nycflights13")`.'
)
}
if ("flights" %in% names(data)) {
check_given_data(data[["flights"]], "flights", 19)
}
if ("weather" %in% names(data)) {
check_given_data(data[["weather"]], "weather", 15)
}
if ("planes" %in% names(data)) {
check_given_data(data[["planes"]], "planes", 9)
}
if ("airlines" %in% names(data)) {
check_given_data(data[["airlines"]], "airlines", 2)
}
if ("airports" %in% names(data)) {
check_given_data(data[["airports"]], "airports", 8)
}
# if the package name isn't valid, error out
if (!grepl(.standard_regexps()$valid_package_name, name)) {
stop_glue("The supplied package name isn't valid. See: \n ",
"http://r-pkgs.had.co.nz/package.html \n",
"for more information.")
}
}
check_given_data <- function(data_, name, ncols) {
if (ncol(data_) != ncols) {
stop_glue("There should be {ncols} columns in the {name} data, but the ",
"supplied {name} data has {ncol(data_)} columns.")
}
if (suppressWarnings(requireNamespace("nycflights13", quietly = TRUE))) {
if (!all(names(data_) %in% names(eval(parse(
text = paste0("nycflights13::", name)))))) {
stop_glue("The column names in the {name} data don't match the ",
"expected column names. See names(nycflights13::{name}) ",
"for expected column names.")
}
}
}
# progress updates utility -----------------------------------------
# A wrapper around str_pad for easier defaults
pad_text <- function(msg, width = 50) {
stringr::str_pad(msg, width, side = "right")
}
# call tick on pb with an update
write_tick <- function(pb, update) {
pb$tick(tokens = list(what = paste0(pad_text(update))))
}
# call message on pb with the total elapsed time
write_message <- function(pb, update, diff_fn) {
pb$message(paste0(pad_text(update,
50 - stringr::str_length(diff_fn())),
diff_fn()))
}
# create a function that returns the difference in time
# from when the function was created, in seconds
create_diff_from_start <- function() {
start <- Sys.time()
diff_from_start <- function() {
difftime(Sys.time(), start, units = "secs") %>%
as.numeric() %>%
round() %>%
as.character() %>%
paste0("s")
}
}
# convert month numbers to names for progress updates
months <- c("January", "February", "March", "April",
"May", "June", "July", "August",
"September", "October", "November", "December")
|
/R/utils.R
|
no_license
|
als23/anyflights
|
R
| false
| false
| 24,251
|
r
|
# general utilities --------------------------------------------------------
# a general argument checking function
check_arguments <- function(station = NULL, year = NULL,
month = NULL, dir = NULL, context = NA) {
# test internet connection
if (!connected_to_internet()) {
stop_no_internet()
}
# checking the "station" argument
if (context %in% c("flights", "weather")) {
if (!all(is.character(station))) {
stop_glue("At least one of the provided station arguments, ",
"{list(station)}, wasn't a character string. Have you ",
"surrounded the airport names in quotes?")
}
if (!all(station %in% get_airports()$faa)) {
stop_glue("Couldn't find at least one of the provided origin airports ",
"{list(station)}. Please consider using the get_airports() function ",
"to locate the desired FAA LID code!")
}
}
# checking the "year" argument
if (context %in% c("flights", "planes", "weather")) {
if (!is.numeric(year)) {
stop_glue("The provided `year` argument has class {class(year)}, but ",
"it needs to be a numeric. Have you put the year in quotes?")
}
if (year > as.numeric(substr(Sys.Date(), 1, 4))) {
stop_glue("The provided `year` is in the future. Oops. :-)")
}
if (year == as.numeric(substr(Sys.Date(), 1, 4))) {
stop_glue("The data for this year isn't quite available yet. The data ",
"for the previous year usually is released in February ",
"or March!")
}
if (year < 1987) {
stop_glue("Your `year` argument {year} is really far back in time! ",
"`anyflights` data sources do not provide data this old.")
}
if (year < 2013 & context == "planes") {
warning_glue("Planes data was not formatted consistently before 2013. ",
"Please use caution.")
} else if (context != "planes" & year < 2010) {
message_glue("Queries before 2010 are untested by the package. ",
"Please use caution!")
}
}
# checking the "month" argument
if (context %in% c("flights", "weather")) {
if (!is.numeric(month)) {
stop_glue("The provided `month` argument has class {class(month)}, but ",
"it needs to be a numeric. Have you put the months in quotes?")
}
if (any(month > 12 | month < 1)) {
stop_glue("Please enter only month values within 1 to 12.")
}
}
if (!is.null(dir)) {
if (!dir.exists(dir)) {
dir_ <- tryCatch(dir.create(dir), error = function(e) e)
if (inherits(dir_, "error")) {
stop_glue("anyflights had trouble making the folder specified by ",
"the directory argument {dir}. Here's the error: \n {dir_}")
}
}
}
}
# a function derived from simonpcouch/gbfs to check if a URL exists
url_exists <- function(x, quiet = FALSE, ...) {
capture_error <- function(code, otherwise = NULL, quiet = TRUE) {
tryCatch(
list(result = code, error = NULL),
error = function(e) {
list(result = otherwise, error = e)
}
)
}
safely <- function(.f, otherwise = NULL, quiet = TRUE) {
function(...) capture_error(.f(...), otherwise, quiet)
}
sHEAD <- safely(httr::HEAD)
sGET <- safely(httr::GET)
if (!stringr::str_detect(x, "http")) {
x <- paste0("https://", x)
}
res <- sHEAD(x, ...)
if (is.null(res$result)) {
res <- sGET(x, ...)
if (is.null(res$result)) {
return(FALSE)
}
}
return(TRUE)
}
# a function to alert the user of no internet connection in a
# more informative/helpful way
stop_no_internet <- function() {
stop_glue("You don't seem to have an active internet connection. Please ",
"connect to the internet to use the anyflights package.")
return(list())
}
# a wrapper around has internet so that with_mock can be used in tests
connected_to_internet <- function() {
curl::has_internet()
}
stop_glue <- function(..., .sep = "", .envir = parent.frame(),
call. = FALSE, .domain = NULL) {
stop(
glue_null(..., .sep = .sep, .envir = .envir),
call. = call., domain = .domain
)
}
warning_glue <- function(..., .sep = "", .envir = parent.frame(),
call. = FALSE, .domain = NULL) {
warning(
glue_null(..., .sep = .sep, .envir = .envir),
call. = call., domain = .domain
)
}
message_glue <- function(..., .sep = "", .envir = parent.frame(),
.domain = NULL, .appendLF = TRUE) {
message(
glue_null(..., .sep = .sep, .envir = .envir),
domain = .domain, appendLF = .appendLF
)
}
# glue messages, warnings, and errors
glue_null <- function(..., .sep = "", .envir = parent.frame()) {
glue::glue(
..., .sep = .sep, .envir = .envir, .transformer = null_transformer
)
}
# actually print NULLs in output
null_transformer <- function(text, envir) {
out <- eval(parse(text = text, keep.source = FALSE), envir)
if (is.null(out)) {
return("NULL")
}
out
}
load_as <- function(filepath) {
new_env <- new.env()
data <- load(filepath, new_env)[1]
new_env[[data]]
}
skip_conditions <- function() {
internet <- curl::has_internet()
on_mac <- unname(Sys.info()["sysname"]) != "Darwin"
(!internet) | on_mac
}
download_file_wrapper <- function(url, file_path, quiet = TRUE){
out <- tryCatch(
utils::download.file(url, file_path, quiet = quiet),
error = function(e) {e}
)
if (inherits(out, "error")) {
stop_glue(
"\n\n\nutils::download.file timed out before finishing downloading the file. ",
"If you are repeatedly getting a timeout error, try extending the ",
"timeout period for your R session using ",
"options(timeout = timeout_value_in_seconds)\n\n\n")
}
out
}
# get_flights utilities --------------------------------------------------
download_month <- function(year, month, dir, flight_exdir, pb, diff_fn) {
# update the progress bar with the month being downloaded
write_tick(pb = pb, paste0(" Downloading Flights Data for ",
months[month],
"..."))
# put together the url for the relevant year and month
fl_url <- make_flights_url(year, month)
# make a temporary file to download to
flight_temp <- tempfile(fileext = ".zip")
# download the file
download_file_wrapper(fl_url, flight_temp, quiet = TRUE)
# ...and unzip it
flight_files <- utils::unzip(flight_temp, list = TRUE)
# only extract biggest file (its the one we want!)
flight_csv <- flight_files$Name[order(flight_files$Length,
decreasing = TRUE)[1]]
utils::unzip(flight_temp, exdir = flight_exdir,
junkpaths = TRUE, files = flight_csv)
# rename the file so that it's easier to find elsewhere
flight_src <- paste0(flight_exdir, "/", flight_csv)
flight_dst <- paste0(flight_exdir, "/", year, "-", month, ".csv")
file.rename(flight_src, flight_dst)
write_message(pb,
paste0("Downloaded Flights Data for ",
months[month]),
diff_fn)
}
get_flight_data <- function(path, station) {
# read in the data
suppressMessages(vroom::vroom(path,
progress = FALSE,
show_col_types = FALSE)) %>%
# select relevant columns
dplyr::select(
year = Year,
month = Month,
day = DayofMonth,
dep_time = DepTime,
sched_dep_time = CRSDepTime,
dep_delay = DepDelay,
arr_time = ArrTime,
sched_arr_time = CRSArrTime,
arr_delay = ArrDelay,
carrier = Reporting_Airline,
flight = Flight_Number_Reporting_Airline,
tailnum = Tail_Number,
origin = Origin,
dest = Dest,
air_time = AirTime,
distance = Distance) %>%
# only keep the relevant rows
dplyr::filter(origin %in% station) %>%
dplyr::mutate(
# convert column classes
dep_time = as.integer(dep_time),
sched_dep_time = as.integer(sched_dep_time),
flight = as.factor(flight),
# mutate some help time columns
hour = sched_dep_time %/% 100,
minute = sched_dep_time %% 100,
time_hour = lubridate::make_datetime(year, month, day, hour, 0, 0),
# cleanup NAs in the tailnum column
tailnum = dplyr::case_when(
tailnum == "" ~ NA_character_,
TRUE ~ tailnum),
# convert column types to match the original data
year = as.integer(year),
month = as.integer(month),
day = as.integer(day),
arr_time = as.integer(arr_time),
sched_arr_time = as.integer(sched_arr_time),
flight = as.integer(flight)
)
}
# given a year and month, this function returns the URL to query for the data
make_flights_url <- function(year, month) {
base_url <- "https://transtats.bts.gov/PREZIP/"
paste0(base_url,
"On_Time_Reporting_Carrier_On_Time_Performance_1987_present_",
year, "_", month, ".zip")
}
# get_airlines utilities ----------------------------------------------------
parse_flights_data_arg <- function(flights_data) {
# if it's a character vector, check if it's a filepath
if (is.character(flights_data)) {
if (file.exists(flights_data)) {
flights_data <- load_as(flights_data)
}
}
# now, if flights_data is a dataframe...
if (is.data.frame(flights_data)) {
# and carrier is in the column names
if ("carrier" %in% colnames(flights_data)) {
# just return the dataframe
return(flights_data)
}
}
# otherwise, return null
return(NULL)
}
# get_airports utilities -----------------------------------------------------
airports_cols <- readr::cols(
id = readr::col_integer(),
name = readr::col_character(),
city = readr::col_character(),
country = readr::col_character(),
faa = readr::col_character(),
icao = readr::col_character(),
lat = readr::col_double(),
lon = readr::col_double(),
alt = readr::col_double(),
tz = readr::col_double(),
dst = readr::col_character(),
tzone = readr::col_character(),
type = readr::col_character(),
source = readr::col_character()
)
# get_weather utilities -------------------------------------------------
weather_col_types <- readr::cols(
.default = readr::col_double(),
station = readr::col_character(),
valid = readr::col_datetime(format = ""),
skyc1 = readr::col_character(),
skyc2 = readr::col_character(),
skyc3 = readr::col_character(),
skyc4 = readr::col_logical(),
skyl4 = readr::col_logical(),
wxcodes = readr::col_character(),
peak_wind_time = readr::col_datetime(format = ""),
metar = readr::col_character()
)
process_month_arg <- function(month) {
start_month <- min(month)
end_month <- max(month)
last_day <- c(31, 28, 31, 30,
31, 30, 31, 31,
30, 31, 30, 31)[end_month]
return(c(start_month, end_month, last_day))
}
get_weather_for_station <- function(station, year, dir,
month_and_day_range, month) {
# query setup
weather_url <- "http://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?"
weather_query <- list(
station = station,
data = "all",
year1 = as.character(year),
month1 = as.character(month_and_day_range[1]),
day1 = "1",
year2 = as.character(year),
month2 = as.character(month_and_day_range[2]),
day2 = as.character(month_and_day_range[3]),
tz = "Etc/UTC",
format = "comma",
latlon = "no",
direct = "yes"
)
# query the data!
request <- httr::GET(weather_url,
query = weather_query,
httr::write_disk(paste0(dir,
"/weather_",
station,
".csv"),
overwrite = TRUE))
httr::stop_for_status(request)
# load the data, but fast !
weather_raw <- vroom::vroom(file = paste0(dir,
"/weather_",
station,
".csv"),
comment = "#",
na = "M",
col_names = TRUE,
col_types = weather_col_types)
# delete the raw data
unlink(paste0(dir, "/weather_", station, ".csv"))
# and return the tidied data object :-)
weather_raw %>%
# rename some columns
dplyr::rename(origin = station,
time = valid,
temp = tmpf,
dewp = dwpf,
humid = relh,
wind_dir = drct,
wind_speed = sknt,
wind_gust = gust,
precip = p01i,
pressure = mslp,
visib = vsby,
feels_like = feel) %>%
# get rid of the metadata column
dplyr::select(-metar) %>%
# mutate some new useful columns
dplyr::mutate(time = as.POSIXct(strptime(time, "%Y-%m-%d %H:%M")),
wind_speed = as.numeric(wind_speed) * 1.15078, # convert to mpg
wind_gust = as.numeric(wind_speed) * 1.15078,
year = as.integer(year),
month = as.integer(lubridate::month(time)),
day = lubridate::mday(time),
hour = lubridate::hour(time),
time_hour = ISOdatetime(year, month, day, hour, 0, 0)) %>%
# filter to only relevant rows - necessary for discontinuous month ranges
dplyr::filter(month %in% !!month) %>%
# remove duplicates / incompletes
dplyr::group_by(origin, month, day, hour) %>%
dplyr::filter(dplyr::row_number() == 1) %>%
dplyr::ungroup() %>%
# reorder columns to match the original dataset
dplyr::select(origin, year, month, day, hour, temp, dewp,
humid, wind_dir, wind_speed, wind_gust, precip,
pressure, visib, time_hour)
}
# get_planes utilities ------------------------------------------------------
get_planes_data <- function(year, dir, flights_data) {
# put together the url to query the planes data at
planes_src <- paste0(
"http://registry.faa.gov/database/yearly/ReleasableAircraft.",
year,
".zip"
)
# and a folder to save the planes data to
planes_lcl <- paste0(dir, "/planes")
if (!dir.exists(planes_lcl)) {dir.create(planes_lcl)}
# download the planes data
planes_tmp <- tempfile(fileext = ".zip")
download_file_wrapper(planes_src, planes_tmp, quiet = TRUE)
# ...and unzip it!
utils::unzip(planes_tmp, exdir = planes_lcl, junkpaths = TRUE)
# loading in and tidying the master planes data
planes_master <- process_planes_master(planes_lcl)
# loading in and tidying the planes reference data
planes_ref <- process_planes_ref(planes_lcl)
# join the master and ref data together
planes <- join_planes_data(planes_master, planes_ref)
# filter the planes data by the flights data, if relevant
planes <- join_planes_to_flights_data(planes, flights_data)
}
process_planes_master <- function(planes_lcl) {
suppressMessages(
# read in the data, but fast
planes_master <- vroom::vroom(paste0(planes_lcl, "/MASTER.txt"),
progress = FALSE) %>%
# the column names change every year, but the positions have stayed the
# same -- select by position :-(
dplyr::select(nnum = 1, code = 3, year = 5)
)
# delete the temporary folder
unlink(x = planes_lcl, recursive = TRUE)
planes_master
}
process_planes_ref <- function(planes_lcl) {
# 99.96% of the tailnumbers that were in the 2013 data are in the
# 2019 data -- similar numbers hold for 2015, 2017. since formatting
# is so unstable, just query the 2019 acftref data for now and join
# to the given year's master.txt data to get the accurate data
# for tailnums licensed in that year
if (!dir.exists(planes_lcl)) {dir.create(planes_lcl)}
# download the planes acftref data
planes_tmp <- tempfile(fileext = ".zip")
download_file_wrapper(
"http://registry.faa.gov/database/yearly/ReleasableAircraft.2019.zip",
planes_tmp,
quiet = TRUE)
# ...and unzip it!
utils::unzip(planes_tmp, exdir = planes_lcl, junkpaths = TRUE)
# read in the data, but fast
planes_ref <- vroom::vroom(paste0(planes_lcl,
"/",
"ACFTREF.txt"),
col_names = planes_ref_col_names,
col_types = planes_ref_col_types,
progress = FALSE,
skip = 1) %>%
dplyr::select(code, mfr, model, type_acft,
type_eng, no_eng, no_seats, speed)
# delete the temporary folder
unlink(x = planes_lcl, recursive = TRUE)
planes_ref
}
join_planes_data <- function(planes_master, planes_ref) {
planes_master %>%
dplyr::inner_join(planes_ref, by = "code") %>%
dplyr::select(-code) %>%
dplyr::mutate(speed = dplyr::if_else(speed == 0, NA_character_, speed),
no_eng = dplyr::if_else(no_eng == 0, NA_integer_, no_eng),
no_seats = dplyr::if_else(no_seats == 0, NA_integer_, no_seats),
engine = engine_types[type_eng + 1],
type = acft_types[type_acft],
tailnum = paste0("N", nnum),
year = as.integer(year),
speed = as.integer(speed)) %>%
dplyr::rename(manufacturer = mfr,
engines = no_eng,
seats = no_seats) %>%
dplyr::select(tailnum, year, type, manufacturer, model, engines,
seats, speed, engine)
}
planes_ref_col_names <- c("code", "mfr", "model", "type_acft", "type_eng", "ac",
"amat", "no_eng", "no_seats", "na1", "speed", "na2")
engine_types <- c("None", "Reciprocating", "Turbo-prop", "Turbo-shaft",
"Turbo-jet", "Turbo-fan", "Ramjet", "2 Cycle", "4 Cycle",
"Unknown", "Electric", "Rotary")
acft_types <- c("Glider", "Balloon", "Blimp/Dirigible",
"Fixed wing single engine", "Fixed wing multi engine",
"Rotorcraft", "Weight-shift-control", "Powered Parachute",
"Gyroplane")
planes_ref_col_types <- readr::cols(
code = readr::col_character(),
mfr = readr::col_character(),
model = readr::col_character(),
type_acft = readr::col_integer(),
type_eng = readr::col_integer(),
ac = readr::col_integer(),
amat = readr::col_integer(),
no_eng = readr::col_integer(),
no_seats = readr::col_integer(),
na1 = readr::col_character(),
speed = readr::col_character(),
na2 = readr::col_character()
)
# filter the planes data by the flights data, if relevant
join_planes_to_flights_data <- function(planes, flights_data) {
# interpret the flights_data argument
flights_data <- parse_flights_data_arg(flights_data)
# join to flights data if it was supplied
if (!is.null(flights_data)) {
planes <- planes %>%
dplyr::semi_join(flights_data, "tailnum") %>%
dplyr::arrange(tailnum)
}
planes
}
# create_flights_package utilities -------------------------------------
save_flights_data <- function(data, name) {
dir.create(paste0(name, "/data"))
purrr::map2(names(data), data, save_flights_dataset, name)
}
save_flights_dataset <- function(dataset_name, data, name) {
# save the dataset to file with the appropriate object name attached to it
assign(dataset_name, data)
save(list = dataset_name,
file = paste0(name, "/data/", dataset_name, ".rda"))
}
write_flights_documentation <- function(name) {
# check for which datasets are included in the package
which_data <- dir(paste0(name, "/data")) %>% stringr::str_replace(".rda", "")
# only write documentation for the relevant datasets
needed_docs <- sysdata[which_data]
# create the .R files in R/
purrr::map(paste0(name, "/R/", names(needed_docs), ".R"), file.create)
# write the .R data to them
purrr::map2(needed_docs,
paste0(name, "/R/", names(needed_docs), ".R"),
writeLines)
# generate .Rd documentation files
roxygen2::roxygenize(name)
invisible(TRUE)
}
check_as_flights_package_arguments <- function(data, name) {
# check the supplied data
if (!inherits(data, "list")) {
stop_glue("The `data` argument to `as_flights_package` must be a named ",
"list, but you've provided an object with class ",
"{list(class(data))}.")
}
if (is.null(names(data))) {
stop_glue("The `data` argument must have names.")
}
if (any(!names(data) %in% c("flights", "weather", "airlines",
"airports", "planes"))) {
stop_glue('Each of the names of the list for the `data` argument must be ',
'one of "flights", "weather", "airlines", "airports", ',
'or "planes".')
}
if (!suppressWarnings(requireNamespace("nycflights13", quietly = TRUE))) {
warning_glue(
"Some internal checks in as_flights_package make use of the nycflights13 ",
"package, but nycflights13 is not installed. To avoid warnings in the ",
'future, please install nycflights13 with `install.packages("nycflights13")`.'
)
}
if ("flights" %in% names(data)) {
check_given_data(data[["flights"]], "flights", 19)
}
if ("weather" %in% names(data)) {
check_given_data(data[["weather"]], "weather", 15)
}
if ("planes" %in% names(data)) {
check_given_data(data[["planes"]], "planes", 9)
}
if ("airlines" %in% names(data)) {
check_given_data(data[["airlines"]], "airlines", 2)
}
if ("airports" %in% names(data)) {
check_given_data(data[["airports"]], "airports", 8)
}
# if the package name isn't valid, error out
if (!grepl(.standard_regexps()$valid_package_name, name)) {
stop_glue("The supplied package name isn't valid. See: \n ",
"http://r-pkgs.had.co.nz/package.html \n",
"for more information.")
}
}
check_given_data <- function(data_, name, ncols) {
if (ncol(data_) != ncols) {
stop_glue("There should be {ncols} columns in the {name} data, but the ",
"supplied {name} data has {ncol(data_)} columns.")
}
if (suppressWarnings(requireNamespace("nycflights13", quietly = TRUE))) {
if (!all(names(data_) %in% names(eval(parse(
text = paste0("nycflights13::", name)))))) {
stop_glue("The column names in the {name} data don't match the ",
"expected column names. See names(nycflights13::{name}) ",
"for expected column names.")
}
}
}
# progress updates utility -----------------------------------------
# A wrapper around str_pad for easier defaults
pad_text <- function(msg, width = 50) {
stringr::str_pad(msg, width, side = "right")
}
# call tick on pb with an update
write_tick <- function(pb, update) {
pb$tick(tokens = list(what = paste0(pad_text(update))))
}
# call message on pb with the total elapsed time
write_message <- function(pb, update, diff_fn) {
pb$message(paste0(pad_text(update,
50 - stringr::str_length(diff_fn())),
diff_fn()))
}
# create a function that returns the difference in time
# from when the function was created, in seconds
create_diff_from_start <- function() {
start <- Sys.time()
diff_from_start <- function() {
difftime(Sys.time(), start, units = "secs") %>%
as.numeric() %>%
round() %>%
as.character() %>%
paste0("s")
}
}
# convert month numbers to names for progress updates
months <- c("January", "February", "March", "April",
"May", "June", "July", "August",
"September", "October", "November", "December")
|
#Objetivo: dibujar la "curva ROC de una variable"
#limpio la memoria
rm(list=ls())
gc()
library("data.table")
setwd("E:/UBA/2019-II/DM en Finanzas/Dropbox Prof/datasets")
#cargo los datos
dataset <- fread("201902.txt")
#creo una clase que se 1 cuando es BAJA+2 , y 0 en caso contrario
#esto me simplifica las cuentas
dataset[ , clase01:= as.numeric(clase_ternaria=="BAJA+2") ]
#creo una variable azar que me va a ser util
#para ordenar al azar los registros que tienen el mismo valor para un campo
#asi el dibujo de la curva ROC de ese segmento es una recta
dataset[ , azar := runif(nrow(dataset)) ]
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#Creo dos funciones de forma de poder superponer varios cortes de una misma variable
#-------------------------------------------------------------
graficar_init = function()
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#la diagonal
azar_neg <- c( 0, neg_total )
azar_pos <- c( 0, pos_total )
#grafico
plot( azar_neg,
azar_pos,
type="n",
main=paste( "ROC Curve" ),
xlab="neg",
ylab="pos",
pch=19)
lines( azar_neg, azar_pos, type="l" , col="black", lwd=2)
}
#----------------------
pred_graficar = function(dataset, pcolumna, pvalor )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
pos_pred <- sum( dataset[ get(pcolumna) <= pvalor , clase01] )
neg_pred <- sum( 1 - dataset[ get(pcolumna) <= pvalor, clase01] )
AUC <- (pos_pred*neg_pred + (pos_pred + pos_total)*(neg_total-neg_pred) ) / (2*pos_total*neg_total)
#creo el vector con los tres puntos
vneg <- c( 0, neg_pred, neg_total )
vpos <- c( 0, pos_pred, pos_total )
#grafico
lines( vneg, vpos, type="l" , col="green", lwd=2)
return( AUC )
}
#----------------------
graficar_init()
#ejecutar las instrucciones de a una y ver el efecto
pred_graficar( dataset, "mcuentas_saldo", -100000 )
pred_graficar( dataset, "mcuentas_saldo", -10000 )
pred_graficar( dataset, "mcuentas_saldo", -2000 )
pred_graficar( dataset, "mcuentas_saldo", 0 )
pred_graficar( dataset, "mcuentas_saldo", 10000 )#Mejor AUC
pred_graficar( dataset, "mcuentas_saldo", 50000 )
pred_graficar( dataset, "mcuentas_saldo", 100000 )
#----------------------
#Concepto fundamental
#se ordena el dataset por una variable
#y se lleva el conteo de positivos y negativos
columna_graficar = function(dataset, pcolumna )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#ordeno por <pcolumna, azar>
#dentro de los registros que tienen el mismo valor de pcolumna, ordeno por el campo que invente llamado azar
#los NA de pcolumna van al inicio del orden
univar <- dataset[ order(get(pcolumna), na.last=FALSE, azar), c("clase01", pcolumna), with=FALSE]
#acumulo positivos y negativos, operacion vectorial
neg_acum <- cumsum( 1- univar$clase01 )
pos_acum <- cumsum( univar$clase01 )
#dibujo la curva, que esta compuesta de miles de puntos
lines( neg_acum, pos_acum, type="l" , col="red", lwd=2)
#calculo el vector de cada AUC, que consiste en cortar exactamente en ese punto
AUC_vector <- ( pos_acum*neg_acum + (pos_acum+pos_total)*(neg_total-neg_acum) ) / (2*pos_total*neg_total)
#voy a calcular cual es el corte que genera la mayor AUC
return( list( "variable"= pcolumna,
"valor" = univar[ which.max( AUC_vector ), get(pcolumna)],
"AUC_max" = max( AUC_vector)
)
)
}
#----------------------
columna_graficar( dataset, "mcuentas_saldo" )#AUC_mAX de 0.75
columna_graficar( dataset, "cliente_edad" )
columna_graficar( dataset, "Visa_mconsumototal" )#AUC_mAX de 0.732
columna_graficar( dataset, "ttarjeta_visa" )
columna_graficar( dataset, "ttarjeta_master" )
columna_graficar( dataset, "Visa_cuenta_estado" )
#ahora uso el valor optimo que encontre en la instruccion anterior
#cortar mcuentas_saldo en 1275.59
graficar_init()
pred_graficar( dataset, "mcuentas_saldo", 1275.59 )
columna_graficar( dataset, "mcuentas_saldo" )#AUC_mAX de 0.75
#cortar Visa_mconsumototal en 863.19
graficar_init()
pred_graficar( dataset, "Visa_mconsumototal", 863.19 )
columna_graficar( dataset, "Visa_mconsumototal" )#AUC_mAX de 0.732
#cortar cliente_edad en 36
graficar_init()
pred_graficar( dataset, "cliente_edad", 36 )
columna_graficar( dataset, "cliente_edad" )#AUC_mAX de 0.527
#como da la tabla de contingencia de mcuentas_saldo
ftable(dataset[ mcuentas_saldo <= 1275.59, clase_ternaria])
ftable(dataset[ mcuentas_saldo > 1275.59, clase_ternaria])
#como da la tabla de contingencia de Visa_mconsumototal
ftable(dataset[ Visa_mconsumototal <= 863.19, clase_ternaria])
ftable(dataset[ Visa_mconsumototal > 863.19, clase_ternaria])
#-----------------------
#Desvio, calcular la ganancia acumulada
columna_graficar_ganancia = function(dataset, pcolumna )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#ordeno por <pcolumna, azar>
#dentro de los registros que tienen el mismo valor de pcolumna, ordeno por el campo que invente llamado azar
#los NA de pcolumna van al inicio del orden
univar <- dataset[ order(get(pcolumna), na.last=FALSE, azar), c("clase01", pcolumna), with=FALSE]
#acumulo positivos y negativos, operacion vectorial
neg_acum <- cumsum( 1- univar$clase01 )
pos_acum <- cumsum( univar$clase01 )
gan_acum <- 19500*pos_acum - 500*neg_acum
#grafico
plot( seq(universo),
gan_acum,
type="n",
main=paste( "Ganancia ordenado por", pcolumna ),
xlab="registros",
ylab="Ganancia",
pch=19)
lines( seq(universo), gan_acum, type="l" , col="blue", lwd=2)
return( list( "variable"= pcolumna,
"valor" = univar[ which.max( gan_acum ), get(pcolumna)],
"gan_max" = max( gan_acum),
"regis" = which.max( gan_acum )
)
)
}
#---------------------
columna_graficar_ganancia( dataset, "mcuentas_saldo" )#AUC_mAX de 0.75
columna_graficar_ganancia( dataset, "cliente_edad" )
columna_graficar_ganancia( dataset, "Visa_mconsumototal" )#AUC_mAX de 0.732
columna_graficar_ganancia( dataset, "ttarjeta_visa" )
columna_graficar_ganancia( dataset, "ttarjeta_master" )
columna_graficar_ganancia( dataset, "Visa_cuenta_estado" )
# Y ahora graficando los primeros n registros
columna_graficar_ganancia_n = function(dataset, pcolumna, pcantidad )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#ordeno por <pcolumna, azar>
univar <- dataset[ order(get(pcolumna), na.last=FALSE, azar), c("clase01", pcolumna), with=FALSE]
#acumulo positivos y negativos, operacion vectorial
neg_acum <- cumsum( 1- univar$clase01 )
pos_acum <- cumsum( univar$clase01 )
gan_acum <- 19500*pos_acum - 500*neg_acum
#grafico
plot( seq(pcantidad),
gan_acum[1:pcantidad],
type="n",
main=paste( "Ganancia ordenado por", pcolumna ),
xlab="registros",
ylab="Ganancia",
pch=19)
lines( seq(pcantidad), gan_acum[1:pcantidad], type="l" , col="blue", lwd=2)
return( list( "variable"= pcolumna,
"valor" = univar[ which.max( gan_acum ), get(pcolumna)],
"gan_max" = max( gan_acum),
"regis" = which.max( gan_acum )
)
)
}
#---------------------
columna_graficar_ganancia_n( dataset, "mcuentas_saldo", 20000 )
columna_graficar_ganancia_n( dataset, "Visa_mconsumototal", 40000 )
|
/elementary/ROC_02.r
|
permissive
|
ktavo/dm-finanzas-2019
|
R
| false
| false
| 8,010
|
r
|
#Objetivo: dibujar la "curva ROC de una variable"
#limpio la memoria
rm(list=ls())
gc()
library("data.table")
setwd("E:/UBA/2019-II/DM en Finanzas/Dropbox Prof/datasets")
#cargo los datos
dataset <- fread("201902.txt")
#creo una clase que se 1 cuando es BAJA+2 , y 0 en caso contrario
#esto me simplifica las cuentas
dataset[ , clase01:= as.numeric(clase_ternaria=="BAJA+2") ]
#creo una variable azar que me va a ser util
#para ordenar al azar los registros que tienen el mismo valor para un campo
#asi el dibujo de la curva ROC de ese segmento es una recta
dataset[ , azar := runif(nrow(dataset)) ]
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#Creo dos funciones de forma de poder superponer varios cortes de una misma variable
#-------------------------------------------------------------
graficar_init = function()
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#la diagonal
azar_neg <- c( 0, neg_total )
azar_pos <- c( 0, pos_total )
#grafico
plot( azar_neg,
azar_pos,
type="n",
main=paste( "ROC Curve" ),
xlab="neg",
ylab="pos",
pch=19)
lines( azar_neg, azar_pos, type="l" , col="black", lwd=2)
}
#----------------------
pred_graficar = function(dataset, pcolumna, pvalor )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
pos_pred <- sum( dataset[ get(pcolumna) <= pvalor , clase01] )
neg_pred <- sum( 1 - dataset[ get(pcolumna) <= pvalor, clase01] )
AUC <- (pos_pred*neg_pred + (pos_pred + pos_total)*(neg_total-neg_pred) ) / (2*pos_total*neg_total)
#creo el vector con los tres puntos
vneg <- c( 0, neg_pred, neg_total )
vpos <- c( 0, pos_pred, pos_total )
#grafico
lines( vneg, vpos, type="l" , col="green", lwd=2)
return( AUC )
}
#----------------------
graficar_init()
#ejecutar las instrucciones de a una y ver el efecto
pred_graficar( dataset, "mcuentas_saldo", -100000 )
pred_graficar( dataset, "mcuentas_saldo", -10000 )
pred_graficar( dataset, "mcuentas_saldo", -2000 )
pred_graficar( dataset, "mcuentas_saldo", 0 )
pred_graficar( dataset, "mcuentas_saldo", 10000 )#Mejor AUC
pred_graficar( dataset, "mcuentas_saldo", 50000 )
pred_graficar( dataset, "mcuentas_saldo", 100000 )
#----------------------
#Concepto fundamental
#se ordena el dataset por una variable
#y se lleva el conteo de positivos y negativos
columna_graficar = function(dataset, pcolumna )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#ordeno por <pcolumna, azar>
#dentro de los registros que tienen el mismo valor de pcolumna, ordeno por el campo que invente llamado azar
#los NA de pcolumna van al inicio del orden
univar <- dataset[ order(get(pcolumna), na.last=FALSE, azar), c("clase01", pcolumna), with=FALSE]
#acumulo positivos y negativos, operacion vectorial
neg_acum <- cumsum( 1- univar$clase01 )
pos_acum <- cumsum( univar$clase01 )
#dibujo la curva, que esta compuesta de miles de puntos
lines( neg_acum, pos_acum, type="l" , col="red", lwd=2)
#calculo el vector de cada AUC, que consiste en cortar exactamente en ese punto
AUC_vector <- ( pos_acum*neg_acum + (pos_acum+pos_total)*(neg_total-neg_acum) ) / (2*pos_total*neg_total)
#voy a calcular cual es el corte que genera la mayor AUC
return( list( "variable"= pcolumna,
"valor" = univar[ which.max( AUC_vector ), get(pcolumna)],
"AUC_max" = max( AUC_vector)
)
)
}
#----------------------
columna_graficar( dataset, "mcuentas_saldo" )#AUC_mAX de 0.75
columna_graficar( dataset, "cliente_edad" )
columna_graficar( dataset, "Visa_mconsumototal" )#AUC_mAX de 0.732
columna_graficar( dataset, "ttarjeta_visa" )
columna_graficar( dataset, "ttarjeta_master" )
columna_graficar( dataset, "Visa_cuenta_estado" )
#ahora uso el valor optimo que encontre en la instruccion anterior
#cortar mcuentas_saldo en 1275.59
graficar_init()
pred_graficar( dataset, "mcuentas_saldo", 1275.59 )
columna_graficar( dataset, "mcuentas_saldo" )#AUC_mAX de 0.75
#cortar Visa_mconsumototal en 863.19
graficar_init()
pred_graficar( dataset, "Visa_mconsumototal", 863.19 )
columna_graficar( dataset, "Visa_mconsumototal" )#AUC_mAX de 0.732
#cortar cliente_edad en 36
graficar_init()
pred_graficar( dataset, "cliente_edad", 36 )
columna_graficar( dataset, "cliente_edad" )#AUC_mAX de 0.527
#como da la tabla de contingencia de mcuentas_saldo
ftable(dataset[ mcuentas_saldo <= 1275.59, clase_ternaria])
ftable(dataset[ mcuentas_saldo > 1275.59, clase_ternaria])
#como da la tabla de contingencia de Visa_mconsumototal
ftable(dataset[ Visa_mconsumototal <= 863.19, clase_ternaria])
ftable(dataset[ Visa_mconsumototal > 863.19, clase_ternaria])
#-----------------------
#Desvio, calcular la ganancia acumulada
columna_graficar_ganancia = function(dataset, pcolumna )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#ordeno por <pcolumna, azar>
#dentro de los registros que tienen el mismo valor de pcolumna, ordeno por el campo que invente llamado azar
#los NA de pcolumna van al inicio del orden
univar <- dataset[ order(get(pcolumna), na.last=FALSE, azar), c("clase01", pcolumna), with=FALSE]
#acumulo positivos y negativos, operacion vectorial
neg_acum <- cumsum( 1- univar$clase01 )
pos_acum <- cumsum( univar$clase01 )
gan_acum <- 19500*pos_acum - 500*neg_acum
#grafico
plot( seq(universo),
gan_acum,
type="n",
main=paste( "Ganancia ordenado por", pcolumna ),
xlab="registros",
ylab="Ganancia",
pch=19)
lines( seq(universo), gan_acum, type="l" , col="blue", lwd=2)
return( list( "variable"= pcolumna,
"valor" = univar[ which.max( gan_acum ), get(pcolumna)],
"gan_max" = max( gan_acum),
"regis" = which.max( gan_acum )
)
)
}
#---------------------
columna_graficar_ganancia( dataset, "mcuentas_saldo" )#AUC_mAX de 0.75
columna_graficar_ganancia( dataset, "cliente_edad" )
columna_graficar_ganancia( dataset, "Visa_mconsumototal" )#AUC_mAX de 0.732
columna_graficar_ganancia( dataset, "ttarjeta_visa" )
columna_graficar_ganancia( dataset, "ttarjeta_master" )
columna_graficar_ganancia( dataset, "Visa_cuenta_estado" )
# Y ahora graficando los primeros n registros
columna_graficar_ganancia_n = function(dataset, pcolumna, pcantidad )
{
#calculos basicos
universo <- nrow(dataset )
pos_total <- sum(dataset$clase01 )
neg_total <- universo - pos_total
#ordeno por <pcolumna, azar>
univar <- dataset[ order(get(pcolumna), na.last=FALSE, azar), c("clase01", pcolumna), with=FALSE]
#acumulo positivos y negativos, operacion vectorial
neg_acum <- cumsum( 1- univar$clase01 )
pos_acum <- cumsum( univar$clase01 )
gan_acum <- 19500*pos_acum - 500*neg_acum
#grafico
plot( seq(pcantidad),
gan_acum[1:pcantidad],
type="n",
main=paste( "Ganancia ordenado por", pcolumna ),
xlab="registros",
ylab="Ganancia",
pch=19)
lines( seq(pcantidad), gan_acum[1:pcantidad], type="l" , col="blue", lwd=2)
return( list( "variable"= pcolumna,
"valor" = univar[ which.max( gan_acum ), get(pcolumna)],
"gan_max" = max( gan_acum),
"regis" = which.max( gan_acum )
)
)
}
#---------------------
columna_graficar_ganancia_n( dataset, "mcuentas_saldo", 20000 )
columna_graficar_ganancia_n( dataset, "Visa_mconsumototal", 40000 )
|
#####################################
## APPORTIONMENT SOLVER/BUILDER? ##
#####################################
#########################
## HAMILTON'S METHOD ##
#########################
hamilton <- function(pop.top, div, n.seats, n.places){
## divisor passed from apportionment builder
div <- div
## number of seats passed from apportionment builder
n.seats <- n.seats
## number of places needing seats apportioned passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / div
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## compute the sum of the lower quotas and append it to the matrix
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## final number of seats is first initialized as the initial seats allocated
pop.top[,"Final"] <- pop.top[,"Initial"]
## supplemental column to determine which places receive extra seats
for (i in 1:n.places){
## for each of the places, takes the decimals of the quotas and appends them to the supplemental column
pop.top[i,"(Extra for Hamilton)"] <- pop.top[i,"Quota"] - floor(pop.top[i,"Quota"])
}
## add one member to each consequent max number of decimals if haven't reached n.seats yet
while (pop.top["Total","Final"] < n.seats){
## index number of the max decimal in the supplemental column
position <- which.max(pop.top[,"(Extra for Hamilton)"])
## add an extra seat to the final number of seats for the place in max decimal position
pop.top[position, "Final"] <- pop.top[position, "Final"] + 1
## recalculate the final number of seats that have been apportioned
pop.top["Total","Final"] <- sum(pop.top[c(1:n.places), "Final"])
## so that the max decimal is not used again, redefine it as -1
## this way the next max decimal is used
pop.top[position, "(Extra for Hamilton)"] <- -1
}
## information from the population matrix wanted to keep when presenting the final answer
keep <- c("Population", "Quota", "Initial", "Final")
## returns the matrix with the kept information
return (pop.top[, keep])
## TO SEE FULL MATRIX:
## STEP 1: COMMENT OUT THE KEEP AND RETURN STATEMENTS STATED PREVIOUSLY
## STEP 2: UNCOMMENT NEXT LINE
#return(pop.top)
}
##########################
## JEFFERSON'S METHOD ##
##########################
jefferson <- function(pop.top, div, n.seats, n.places, mod.div.list){
# standard divisor passed from apportionment builder
std.divisor <- div
# number of seats passed from apportionment builder
n.seats <- n.seats
# number of places passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / std.divisor
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## sum of lower quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## re-evaluate divisor
if (pop.top["Total","Initial"] < n.seats || pop.top["Total","Initial"] > n.seats){
## method used if there weren't enough seats allocated
if (pop.top["Total","Initial"] < n.seats){
## reduce the divisor
## no particular reason that it's 0.95, it just makes the divisor smaller
modified.divisor <- std.divisor*0.95
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
jefferson(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
## method used if there were too many seats allocated
else{
## increase divisor
## no particular reason that it's 1.05, it just makes the divisor bigger
modified.divisor <- std.divisor*1.05
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
jefferson(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
}
## will execute once the divisor no longer needs adjusted
else{
## information from the population matrix wanted to keep when presenting the final answer
keep <- c("Population", "Quota", "Initial")
solution <- list("New Apportionment Table" = pop.top[,keep], "New Divisor Used" = mod.div.list[length(mod.div.list)])
return(solution)
## TO SEE FULL MATRIX:
## STEP 1: COMMENT OUT THE KEEP AND RETURN STATEMENTS STATED PREVIOUSLY
## STEP 2: UNCOMMENT NEXT LINE
#return(pop.top)
}
}
## RETURNS ORIGINAL JEFFERSON SOLUTION
original.jefferson <- function(pop.top, div, n.seats, n.places){
# standard divisor passed from apportionment builder
std.divisor <- div
# number of seats passed from apportionment builder
n.seats <- n.seats
# number of places passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / std.divisor
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## sum of lower quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## returns the number of representatives without recalculating divisor
keep <- c("Population", "Quota", "Initial")
## returns the matrix with the kept information
return (pop.top[, keep])
}
## THIS HELPS IN DETERMINING THE RANGE OF DIVISORS
## USES JEFFERSON METHOD WITHOUT CHANGING DIVISOR
test.jefferson <- function(pop.top, div, n.seats, n.places){
# standard divisor passed from apportionment builder
std.divisor <- div
# number of seats passed from apportionment builder
n.seats <- n.seats
# number of places passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / std.divisor
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## sum of lower quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## returns the number of representatives without recalculating divisor
representatives <- pop.top["Total","Initial"]
return (representatives)
}
## THIS DETERMINES THE RANGE OF THE DIVISORS FOR JEFFERSON
range.jefferson <- function(pop.top, div, n.seats, n.places){
## initial divisor passed from apportionment builder
div <- div
## number of seats passed from apportionment builder
n.seats <- n.seats
## number of places passed from apportionment builder
n.places <- n.places
## potential.divisors is a vector to hold all divisors that have been/are being tested
potential.divisors <- c(div)
## this is for help with the while loop because I haven't quite mastered them in R
help <- 5
## initialize the procedure as "it's fine" because the initial divisor may not need to be changed
procedure <- "it's fine"
while (help > 1){
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is LESS than the number of seats needed, then REDUCE the divisor
if (test.jefferson(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) < n.seats){
## this redefines the procedure
## "reduce" because the divisor needs reduced
procedure <- "reduce"
## systematically check for the next divisor by testing the number one less than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 499
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
}
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is MORE than the number of seats needed, then INCREASE the divisor
else if (test.jefferson(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) > n.seats){
## this redefines the procedure
## "increase" because the divisor needs increased
procedure <- "increase"
## systematically check for the next divisor by testing the number one more than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 501
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
}
## If the divisor last tried gives the correct number of seats, do this
else{
## Pass the procedure used previously
procedure <- procedure
## The range of divisors starts at the last divisor used
begin <- potential.divisors[length(potential.divisors)]
## append the beginning divisor to the vector range.divisors
range.divisors <- c(begin)
while (help > 1){
## If we were reducing the divisor previously, we need to continue reducing it
## until we get to a divisor that no longer works
if (procedure == "reduce"){
## systematically check for the next divisor by testing the number one less than the one just used
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too many seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.jefferson(pop.top, div = try, n.seats, n.places) > n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## sort the range.divisors because the reduce method starts
## at a bigger number and ends at a smaller one
range.divisors <- sort(range.divisors)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If we were increasing the divisor previously, we need to continue increasing it
## until we get to a divisor that no longer works
else if (procedure == "increase"){
## systematically check for the next divisor by testing the number one more than the one just used
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too few seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.jefferson(pop.top, div = try, n.seats, n.places) < n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If the initial divisor is perfectly fine, then return "it's fine"
else{
return (procedure)
}
}
}
}
}
##############################
## HUNTINGTON HILL METHOD ##
##############################
huntington.hill <- function(pop.top, div, n.seats, n.places, mod.div.list){
## standard divisor passed from apportion builder
std.divisor <- div
## number of seats passed from apportion builder
n.seats <- n.seats
## number of places passed from apportion builder
n.places <- n.places
## compute quotas and geometric means
for (i in 1:n.places){
## determine the quotas for each place and append to population matrix
quota <- pop.top[i, "Population"] / std.divisor
pop.top[i,"Quota"] <- quota
## compute lower quota for each place and append to population matrix
low.quota <- floor(quota)
pop.top[i,"Lower.Quota"] <- low.quota
## compute geometric mean for each place and append to population matrix
geometric.mean <- sqrt(low.quota * (low.quota + 1))
pop.top[i,"Geometric.Mean"] <- geometric.mean
## If the quota is greater than the geometric mean, the ceiling of the quota is used
## for the initial number of seats
if (pop.top[i,"Quota"] > pop.top[i,"Geometric.Mean"]){
up.quota <- ceiling(quota)
pop.top[i,"Initial"] <- up.quota
}
## else the floor of the quota is used for the initial number of seats
else{
pop.top[i,"Initial"] <- low.quota
}
}
## sum of new quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## re-evaluate divisor
if (pop.top["Total","Initial"] < n.seats || pop.top["Total","Initial"] > n.seats){
## method used if there weren't enough seats allocated
if (pop.top["Total","Initial"] < n.seats){
## reduce the divisor
## no particular reason that it's 0.95, it just makes the divisor smaller
modified.divisor <- std.divisor*0.95
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
huntington.hill(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
## method used if there were too many seats allocated
else{
## increase divisor
## no particular reason that it's 1.05, it just makes the divisor bigger
modified.divisor <- std.divisor*1.05
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
huntington.hill(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
}
## will execute once the divisor no longer needs adjusted
else{
## information from the population matrix wanted to keep when presenting the final answer
keep <- c("Population", "Quota", "Lower.Quota", "Geometric.Mean", "Initial")
## returns the matrix with the kept information
#return (pop.top[, keep])
solution <- list("New Apportionment Table" = pop.top[,keep], "New Divisor Used" = mod.div.list[length(mod.div.list)])
return(solution)
## TO SEE FULL MATRIX:
## STEP 1: COMMENT OUT THE KEEP AND RETURN STATEMENTS STATED PREVIOUSLY
## STEP 2: UNCOMMENT NEXT LINE
#return(pop.top)
}
}
## RETURNS ORIGINAL HUNTINGTON HILL SOLUTION
original.huntingtonhill <- function(pop.top, div, n.seats, n.places){
## standard divisor passed from apportion builder
std.divisor <- div
## number of seats passed from apportion builder
n.seats <- n.seats
## number of places passed from apportion builder
n.places <- n.places
## compute quotas and geometric means
for (i in 1:n.places){
## determine the quotas for each place and append to population matrix
quota <- pop.top[i, "Population"] / std.divisor
pop.top[i,"Quota"] <- quota
## compute lower quota for each place and append to population matrix
low.quota <- floor(quota)
pop.top[i,"Lower.Quota"] <- low.quota
## compute geometric mean for each place and append to population matrix
geometric.mean <- sqrt(low.quota * (low.quota + 1))
pop.top[i,"Geometric.Mean"] <- geometric.mean
## If the quota is greater than the geometric mean, the ceiling of the quota is used
## for the initial number of seats
if (pop.top[i,"Quota"] > pop.top[i,"Geometric.Mean"]){
up.quota <- ceiling(quota)
pop.top[i,"Initial"] <- up.quota
}
## else the floor of the quota is used for the initial number of seats
else{
pop.top[i,"Initial"] <- low.quota
}
}
## sum of new quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
keep <- c("Population", "Quota", "Lower.Quota", "Geometric.Mean", "Initial")
## returns the matrix with the kept information
return (pop.top[, keep])
}
## THIS HELPS IN DETERMINING THE RANGE OF DIVISORS
## USES HUNTINGTON HILL METHOD WITHOUT CHANGING DIVISOR
test.huntingtonhill <- function(pop.top, div, n.seats, n.places){
## standard divisor passed from apportion builder
std.divisor <- div
## number of seats passed from apportion builder
n.seats <- n.seats
## number of places passed from apportion builder
n.places <- n.places
## compute quotas and geometric means
for (i in 1:n.places){
## determine the quotas for each place and append to population matrix
quota <- pop.top[i, "Population"] / std.divisor
pop.top[i,"Quota"] <- quota
## compute lower quota for each place and append to population matrix
low.quota <- floor(quota)
pop.top[i,"Lower.Quota"] <- low.quota
## compute geometric mean for each place and append to population matrix
geometric.mean <- sqrt(low.quota * (low.quota + 1))
pop.top[i,"Geometric.Mean"] <- geometric.mean
## If the quota is greater than the geometric mean, the ceiling of the quota is used
## for the initial number of seats
if (pop.top[i,"Quota"] > pop.top[i,"Geometric.Mean"]){
up.quota <- ceiling(quota)
pop.top[i,"Initial"] <- up.quota
}
## else the floor of the quota is used for the initial number of seats
else{
pop.top[i,"Initial"] <- low.quota
}
}
## sum of new quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## returns the number of representatives without recalculating divisor
representatives <- pop.top["Total","Initial"]
return (representatives)
}
## THIS DETERMINES THE RANGE OF THE DIVISORS FOR HUNTINGTON HILL
range.huntingtonhill <- function(pop.top, div, n.seats, n.places){
## initial divisor passed from apportionment builder
div <- div
## number of seats passed from apportionment builder
n.seats <- n.seats
## number of places passed from apportionment builder
n.places <- n.places
## potential.divisors is a vector to hold all divisors that have been/are being tested
potential.divisors <- c(div)
## this is for help with the while loop because I haven't quite mastered them in R
help <- 5
## initialize the procedure as "it's fine" because the initial divisor may not need to be changed
procedure <- "it's fine"
while (help > 1){
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is LESS than the number of seats needed, then REDUCE the divisor
if (test.huntingtonhill(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) < n.seats){
## this redefines the procedure
## "reduce" because the divisor needs reduced
procedure <- "reduce"
## systematically check for the next divisor by testing the number one less than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 499
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
}
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is MORE than the number of seats needed, then INCREASE the divisor
else if (test.huntingtonhill(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) > n.seats){
## this redefines the procedure
## "increase" because the divisor needs increased
procedure <- "increase"
## systematically check for the next divisor by testing the number one more than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 501
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
}
## If the divisor last tried gives the correct number of seats, do this
else{
## Pass the procedure used previously
procedure <- procedure
## The range of divisors starts at the last divisor used
begin <- potential.divisors[length(potential.divisors)]
## append the beginning divisor to the vector range.divisors
range.divisors <- c(begin)
while (help > 1){
## If we were reducing the divisor previously, we need to continue reducing it
## until we get to a divisor that no longer works
if (procedure == "reduce"){
## systematically check for the next divisor by testing the number one less than the one just used
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too many seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.huntingtonhill(pop.top, div = try, n.seats, n.places) > n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## sort the range.divisors because the reduce method starts
## at a bigger number and ends at a smaller one
range.divisors <- sort(range.divisors)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If we were increasing the divisor previously, we need to continue increasing it
## until we get to a divisor that no longer works
else if (procedure == "increase"){
## systematically check for the next divisor by testing the number one more than the one just used
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too few seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.huntingtonhill(pop.top, div = try, n.seats, n.places) < n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If the initial divisor is perfectly fine, then return "it's fine"
else{
return (procedure)
}
}
}
}
}
#############################
## APPORTIONMENT BUILDER ##
#############################
## YOU HAVE TO INPUT THE NUMBER OF PLACES THAT NEED SEATS AND THE METHOD USED
build.apportionment <- function(n.places, method){
## randomly select a number between 50 and 100 to be the number of seats needing allocated
n.seats <- sample(50:100, 1)
## allowed to choose the number of places need seats
## passes chosen value of places to use
n.places <- n.places
## Give your places a name!
stuff.names <- LETTERS[1:n.places]
## WHAT TO DO IF YOU DON'T LIKE THE NAMES AS LETTERS OF THE ALPHABET
## STEP 1: COMMENT OUT STUFF.NAMES FROM ABOVE
## STEP 2: UNCOMMENT NEXT LINE AND FILL IN THE NAMES (MAKE SURE YOU HAVE AS MANY NAMES AS PLACES YOU WANT)
## append "Total" to you stuff.names so that we can name the row information in our matrix
places.name <- c(stuff.names, "Total")
## create the population matrix
pop.top <- matrix(nrow = n.places + 1, ncol = 7)
rownames(pop.top) <- places.name
colnames(pop.top) <- c("Population", "Quota", "Lower.Quota", "Geometric.Mean", "Initial", "Final", "(Extra for Hamilton)")
## don't want wonky population values to deal with
nice.numbers <- seq(from = 25000, to = 100000, by = 500)
## Take a sample of the nice.numbers to use as population values
populations <- sample(nice.numbers, n.places)
## append those population values to the population matrix
for (i in 1:n.places){
pop.top[i,"Population"] <- populations[i]
}
## calculate the total population
pop.top["Total","Population"] <- sum(pop.top[c(1:n.places),"Population"])
## Calculate the initial divisor
total.pop <- pop.top["Total","Population"]
divisor <- total.pop / n.seats
print("Number of Seats")
print(n.seats)
print("Divisor")
print(divisor)
mod.div.list <- vector()
## compute the apportionment using Hamilton's method
if (method == "hamilton"){
print(hamilton(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
}
## compute the apportionment using Jefferson's method
else if (method == "jefferson"){
print("Original Apportionment Table")
print(original.jefferson(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
print(jefferson(pop.top, div = divisor, n.seats = n.seats, n.places = n.places, mod.div.list = mod.div.list))
print(range.jefferson(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
}
## compute the apportionment using Huntington Hill's method
else if (method == "huntington.hill"){
print("Original Apportionment Table")
print(original.huntingtonhill(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
print(huntington.hill(pop.top, div = divisor, n.seats = n.seats, n.places = n.places, mod.div.list = mod.div.list))
print(range.huntingtonhill(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
}
}
## automatically generates a seed number
## However, seed numbers can be input manually
## Comment out sample(1:999, 1) and type in the desired seed number
## It will look like this: auto <- 123#sample(1:999, 1)
auto <- sample(100:999, 1)
print(auto)
## Solves the apportionment problem using each method
set.seed(auto)
build.apportionment(n.places = 5, method = "hamilton")
set.seed(auto)
build.apportionment(n.places = 5, method = "jefferson")
set.seed(auto)
build.apportionment(n.places = 5, method = "huntington.hill")
|
/build apportionment.R
|
no_license
|
gkelting/education-creator-solver
|
R
| false
| false
| 30,384
|
r
|
#####################################
## APPORTIONMENT SOLVER/BUILDER? ##
#####################################
#########################
## HAMILTON'S METHOD ##
#########################
hamilton <- function(pop.top, div, n.seats, n.places){
## divisor passed from apportionment builder
div <- div
## number of seats passed from apportionment builder
n.seats <- n.seats
## number of places needing seats apportioned passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / div
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## compute the sum of the lower quotas and append it to the matrix
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## final number of seats is first initialized as the initial seats allocated
pop.top[,"Final"] <- pop.top[,"Initial"]
## supplemental column to determine which places receive extra seats
for (i in 1:n.places){
## for each of the places, takes the decimals of the quotas and appends them to the supplemental column
pop.top[i,"(Extra for Hamilton)"] <- pop.top[i,"Quota"] - floor(pop.top[i,"Quota"])
}
## add one member to each consequent max number of decimals if haven't reached n.seats yet
while (pop.top["Total","Final"] < n.seats){
## index number of the max decimal in the supplemental column
position <- which.max(pop.top[,"(Extra for Hamilton)"])
## add an extra seat to the final number of seats for the place in max decimal position
pop.top[position, "Final"] <- pop.top[position, "Final"] + 1
## recalculate the final number of seats that have been apportioned
pop.top["Total","Final"] <- sum(pop.top[c(1:n.places), "Final"])
## so that the max decimal is not used again, redefine it as -1
## this way the next max decimal is used
pop.top[position, "(Extra for Hamilton)"] <- -1
}
## information from the population matrix wanted to keep when presenting the final answer
keep <- c("Population", "Quota", "Initial", "Final")
## returns the matrix with the kept information
return (pop.top[, keep])
## TO SEE FULL MATRIX:
## STEP 1: COMMENT OUT THE KEEP AND RETURN STATEMENTS STATED PREVIOUSLY
## STEP 2: UNCOMMENT NEXT LINE
#return(pop.top)
}
##########################
## JEFFERSON'S METHOD ##
##########################
jefferson <- function(pop.top, div, n.seats, n.places, mod.div.list){
# standard divisor passed from apportionment builder
std.divisor <- div
# number of seats passed from apportionment builder
n.seats <- n.seats
# number of places passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / std.divisor
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## sum of lower quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## re-evaluate divisor
if (pop.top["Total","Initial"] < n.seats || pop.top["Total","Initial"] > n.seats){
## method used if there weren't enough seats allocated
if (pop.top["Total","Initial"] < n.seats){
## reduce the divisor
## no particular reason that it's 0.95, it just makes the divisor smaller
modified.divisor <- std.divisor*0.95
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
jefferson(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
## method used if there were too many seats allocated
else{
## increase divisor
## no particular reason that it's 1.05, it just makes the divisor bigger
modified.divisor <- std.divisor*1.05
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
jefferson(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
}
## will execute once the divisor no longer needs adjusted
else{
## information from the population matrix wanted to keep when presenting the final answer
keep <- c("Population", "Quota", "Initial")
solution <- list("New Apportionment Table" = pop.top[,keep], "New Divisor Used" = mod.div.list[length(mod.div.list)])
return(solution)
## TO SEE FULL MATRIX:
## STEP 1: COMMENT OUT THE KEEP AND RETURN STATEMENTS STATED PREVIOUSLY
## STEP 2: UNCOMMENT NEXT LINE
#return(pop.top)
}
}
## RETURNS ORIGINAL JEFFERSON SOLUTION
original.jefferson <- function(pop.top, div, n.seats, n.places){
# standard divisor passed from apportionment builder
std.divisor <- div
# number of seats passed from apportionment builder
n.seats <- n.seats
# number of places passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / std.divisor
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## sum of lower quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## returns the number of representatives without recalculating divisor
keep <- c("Population", "Quota", "Initial")
## returns the matrix with the kept information
return (pop.top[, keep])
}
## THIS HELPS IN DETERMINING THE RANGE OF DIVISORS
## USES JEFFERSON METHOD WITHOUT CHANGING DIVISOR
test.jefferson <- function(pop.top, div, n.seats, n.places){
# standard divisor passed from apportionment builder
std.divisor <- div
# number of seats passed from apportionment builder
n.seats <- n.seats
# number of places passed from apportionment builder
n.places <- n.places
## determine initial seat placements
for (i in 1:n.places){
## calculate quota for each place
quota <- pop.top[i, "Population"] / std.divisor
## append quotas to the quota column in the population matrix
pop.top[i,"Quota"] <- quota
## the floor of the quotas is the lower quota
low.quota <- floor(quota)
## append the lower quotas to the initial seat allocation column in the matrix
pop.top[i,"Initial"] <- low.quota
}
## sum of lower quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## returns the number of representatives without recalculating divisor
representatives <- pop.top["Total","Initial"]
return (representatives)
}
## THIS DETERMINES THE RANGE OF THE DIVISORS FOR JEFFERSON
range.jefferson <- function(pop.top, div, n.seats, n.places){
## initial divisor passed from apportionment builder
div <- div
## number of seats passed from apportionment builder
n.seats <- n.seats
## number of places passed from apportionment builder
n.places <- n.places
## potential.divisors is a vector to hold all divisors that have been/are being tested
potential.divisors <- c(div)
## this is for help with the while loop because I haven't quite mastered them in R
help <- 5
## initialize the procedure as "it's fine" because the initial divisor may not need to be changed
procedure <- "it's fine"
while (help > 1){
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is LESS than the number of seats needed, then REDUCE the divisor
if (test.jefferson(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) < n.seats){
## this redefines the procedure
## "reduce" because the divisor needs reduced
procedure <- "reduce"
## systematically check for the next divisor by testing the number one less than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 499
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
}
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is MORE than the number of seats needed, then INCREASE the divisor
else if (test.jefferson(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) > n.seats){
## this redefines the procedure
## "increase" because the divisor needs increased
procedure <- "increase"
## systematically check for the next divisor by testing the number one more than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 501
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
}
## If the divisor last tried gives the correct number of seats, do this
else{
## Pass the procedure used previously
procedure <- procedure
## The range of divisors starts at the last divisor used
begin <- potential.divisors[length(potential.divisors)]
## append the beginning divisor to the vector range.divisors
range.divisors <- c(begin)
while (help > 1){
## If we were reducing the divisor previously, we need to continue reducing it
## until we get to a divisor that no longer works
if (procedure == "reduce"){
## systematically check for the next divisor by testing the number one less than the one just used
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too many seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.jefferson(pop.top, div = try, n.seats, n.places) > n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## sort the range.divisors because the reduce method starts
## at a bigger number and ends at a smaller one
range.divisors <- sort(range.divisors)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If we were increasing the divisor previously, we need to continue increasing it
## until we get to a divisor that no longer works
else if (procedure == "increase"){
## systematically check for the next divisor by testing the number one more than the one just used
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.jefferson to compute the total number of seats allocated with the new divisor
test.jefferson(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too few seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.jefferson(pop.top, div = try, n.seats, n.places) < n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If the initial divisor is perfectly fine, then return "it's fine"
else{
return (procedure)
}
}
}
}
}
##############################
## HUNTINGTON HILL METHOD ##
##############################
huntington.hill <- function(pop.top, div, n.seats, n.places, mod.div.list){
## standard divisor passed from apportion builder
std.divisor <- div
## number of seats passed from apportion builder
n.seats <- n.seats
## number of places passed from apportion builder
n.places <- n.places
## compute quotas and geometric means
for (i in 1:n.places){
## determine the quotas for each place and append to population matrix
quota <- pop.top[i, "Population"] / std.divisor
pop.top[i,"Quota"] <- quota
## compute lower quota for each place and append to population matrix
low.quota <- floor(quota)
pop.top[i,"Lower.Quota"] <- low.quota
## compute geometric mean for each place and append to population matrix
geometric.mean <- sqrt(low.quota * (low.quota + 1))
pop.top[i,"Geometric.Mean"] <- geometric.mean
## If the quota is greater than the geometric mean, the ceiling of the quota is used
## for the initial number of seats
if (pop.top[i,"Quota"] > pop.top[i,"Geometric.Mean"]){
up.quota <- ceiling(quota)
pop.top[i,"Initial"] <- up.quota
}
## else the floor of the quota is used for the initial number of seats
else{
pop.top[i,"Initial"] <- low.quota
}
}
## sum of new quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## re-evaluate divisor
if (pop.top["Total","Initial"] < n.seats || pop.top["Total","Initial"] > n.seats){
## method used if there weren't enough seats allocated
if (pop.top["Total","Initial"] < n.seats){
## reduce the divisor
## no particular reason that it's 0.95, it just makes the divisor smaller
modified.divisor <- std.divisor*0.95
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
huntington.hill(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
## method used if there were too many seats allocated
else{
## increase divisor
## no particular reason that it's 1.05, it just makes the divisor bigger
modified.divisor <- std.divisor*1.05
mod.div.list <- c(mod.div.list, modified.divisor)
## recalculates the seat allocation using the new divisor
huntington.hill(pop.top, div = modified.divisor, n.seats, n.places, mod.div.list)
}
}
## will execute once the divisor no longer needs adjusted
else{
## information from the population matrix wanted to keep when presenting the final answer
keep <- c("Population", "Quota", "Lower.Quota", "Geometric.Mean", "Initial")
## returns the matrix with the kept information
#return (pop.top[, keep])
solution <- list("New Apportionment Table" = pop.top[,keep], "New Divisor Used" = mod.div.list[length(mod.div.list)])
return(solution)
## TO SEE FULL MATRIX:
## STEP 1: COMMENT OUT THE KEEP AND RETURN STATEMENTS STATED PREVIOUSLY
## STEP 2: UNCOMMENT NEXT LINE
#return(pop.top)
}
}
## RETURNS ORIGINAL HUNTINGTON HILL SOLUTION
original.huntingtonhill <- function(pop.top, div, n.seats, n.places){
## standard divisor passed from apportion builder
std.divisor <- div
## number of seats passed from apportion builder
n.seats <- n.seats
## number of places passed from apportion builder
n.places <- n.places
## compute quotas and geometric means
for (i in 1:n.places){
## determine the quotas for each place and append to population matrix
quota <- pop.top[i, "Population"] / std.divisor
pop.top[i,"Quota"] <- quota
## compute lower quota for each place and append to population matrix
low.quota <- floor(quota)
pop.top[i,"Lower.Quota"] <- low.quota
## compute geometric mean for each place and append to population matrix
geometric.mean <- sqrt(low.quota * (low.quota + 1))
pop.top[i,"Geometric.Mean"] <- geometric.mean
## If the quota is greater than the geometric mean, the ceiling of the quota is used
## for the initial number of seats
if (pop.top[i,"Quota"] > pop.top[i,"Geometric.Mean"]){
up.quota <- ceiling(quota)
pop.top[i,"Initial"] <- up.quota
}
## else the floor of the quota is used for the initial number of seats
else{
pop.top[i,"Initial"] <- low.quota
}
}
## sum of new quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
keep <- c("Population", "Quota", "Lower.Quota", "Geometric.Mean", "Initial")
## returns the matrix with the kept information
return (pop.top[, keep])
}
## THIS HELPS IN DETERMINING THE RANGE OF DIVISORS
## USES HUNTINGTON HILL METHOD WITHOUT CHANGING DIVISOR
test.huntingtonhill <- function(pop.top, div, n.seats, n.places){
## standard divisor passed from apportion builder
std.divisor <- div
## number of seats passed from apportion builder
n.seats <- n.seats
## number of places passed from apportion builder
n.places <- n.places
## compute quotas and geometric means
for (i in 1:n.places){
## determine the quotas for each place and append to population matrix
quota <- pop.top[i, "Population"] / std.divisor
pop.top[i,"Quota"] <- quota
## compute lower quota for each place and append to population matrix
low.quota <- floor(quota)
pop.top[i,"Lower.Quota"] <- low.quota
## compute geometric mean for each place and append to population matrix
geometric.mean <- sqrt(low.quota * (low.quota + 1))
pop.top[i,"Geometric.Mean"] <- geometric.mean
## If the quota is greater than the geometric mean, the ceiling of the quota is used
## for the initial number of seats
if (pop.top[i,"Quota"] > pop.top[i,"Geometric.Mean"]){
up.quota <- ceiling(quota)
pop.top[i,"Initial"] <- up.quota
}
## else the floor of the quota is used for the initial number of seats
else{
pop.top[i,"Initial"] <- low.quota
}
}
## sum of new quotas
pop.top["Total","Initial"] <- sum(pop.top[c(1:n.places),"Initial"])
## returns the number of representatives without recalculating divisor
representatives <- pop.top["Total","Initial"]
return (representatives)
}
## THIS DETERMINES THE RANGE OF THE DIVISORS FOR HUNTINGTON HILL
range.huntingtonhill <- function(pop.top, div, n.seats, n.places){
## initial divisor passed from apportionment builder
div <- div
## number of seats passed from apportionment builder
n.seats <- n.seats
## number of places passed from apportionment builder
n.places <- n.places
## potential.divisors is a vector to hold all divisors that have been/are being tested
potential.divisors <- c(div)
## this is for help with the while loop because I haven't quite mastered them in R
help <- 5
## initialize the procedure as "it's fine" because the initial divisor may not need to be changed
procedure <- "it's fine"
while (help > 1){
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is LESS than the number of seats needed, then REDUCE the divisor
if (test.huntingtonhill(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) < n.seats){
## this redefines the procedure
## "reduce" because the divisor needs reduced
procedure <- "reduce"
## systematically check for the next divisor by testing the number one less than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 499
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
}
## Using the last element in the potential.divisors vector, if the number of seats allocated
## is MORE than the number of seats needed, then INCREASE the divisor
else if (test.huntingtonhill(pop.top, div = potential.divisors[length(potential.divisors)], n.seats, n.places) > n.seats){
## this redefines the procedure
## "increase" because the divisor needs increased
procedure <- "increase"
## systematically check for the next divisor by testing the number one more than the one just used
## EG divisor = 500 but doesn't work
## then the next divisor tried is 501
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
}
## If the divisor last tried gives the correct number of seats, do this
else{
## Pass the procedure used previously
procedure <- procedure
## The range of divisors starts at the last divisor used
begin <- potential.divisors[length(potential.divisors)]
## append the beginning divisor to the vector range.divisors
range.divisors <- c(begin)
while (help > 1){
## If we were reducing the divisor previously, we need to continue reducing it
## until we get to a divisor that no longer works
if (procedure == "reduce"){
## systematically check for the next divisor by testing the number one less than the one just used
try <- potential.divisors[length(potential.divisors)] - 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too many seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.huntingtonhill(pop.top, div = try, n.seats, n.places) > n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## sort the range.divisors because the reduce method starts
## at a bigger number and ends at a smaller one
range.divisors <- sort(range.divisors)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If we were increasing the divisor previously, we need to continue increasing it
## until we get to a divisor that no longer works
else if (procedure == "increase"){
## systematically check for the next divisor by testing the number one more than the one just used
try <- potential.divisors[length(potential.divisors)] + 1
## appends the divisor we're trying to potential.divisors
potential.divisors <- c(potential.divisors, try)
## use test.huntingtonhill to compute the total number of seats allocated with the new divisor
test.huntingtonhill(pop.top, div = try, n.seats, n.places)
## If the last divisor used no longer works and gives us too few seats, then the range
## of divisors stops at the second to last element (because that one worked)
if (test.huntingtonhill(pop.top, div = try, n.seats, n.places) < n.seats){
## The range of divisors ends at the second to last divisor used
end <- potential.divisors[length(potential.divisors)-1]
## append the ending divisor to the vector range.divisors
range.divisors <- c(range.divisors, end)
## print the title "Range of Divisors"
print ("Range of Divisors")
## return range.divisors
return (range.divisors)
}
}
## If the initial divisor is perfectly fine, then return "it's fine"
else{
return (procedure)
}
}
}
}
}
#############################
## APPORTIONMENT BUILDER ##
#############################
## YOU HAVE TO INPUT THE NUMBER OF PLACES THAT NEED SEATS AND THE METHOD USED
build.apportionment <- function(n.places, method){
## randomly select a number between 50 and 100 to be the number of seats needing allocated
n.seats <- sample(50:100, 1)
## allowed to choose the number of places need seats
## passes chosen value of places to use
n.places <- n.places
## Give your places a name!
stuff.names <- LETTERS[1:n.places]
## WHAT TO DO IF YOU DON'T LIKE THE NAMES AS LETTERS OF THE ALPHABET
## STEP 1: COMMENT OUT STUFF.NAMES FROM ABOVE
## STEP 2: UNCOMMENT NEXT LINE AND FILL IN THE NAMES (MAKE SURE YOU HAVE AS MANY NAMES AS PLACES YOU WANT)
## append "Total" to you stuff.names so that we can name the row information in our matrix
places.name <- c(stuff.names, "Total")
## create the population matrix
pop.top <- matrix(nrow = n.places + 1, ncol = 7)
rownames(pop.top) <- places.name
colnames(pop.top) <- c("Population", "Quota", "Lower.Quota", "Geometric.Mean", "Initial", "Final", "(Extra for Hamilton)")
## don't want wonky population values to deal with
nice.numbers <- seq(from = 25000, to = 100000, by = 500)
## Take a sample of the nice.numbers to use as population values
populations <- sample(nice.numbers, n.places)
## append those population values to the population matrix
for (i in 1:n.places){
pop.top[i,"Population"] <- populations[i]
}
## calculate the total population
pop.top["Total","Population"] <- sum(pop.top[c(1:n.places),"Population"])
## Calculate the initial divisor
total.pop <- pop.top["Total","Population"]
divisor <- total.pop / n.seats
print("Number of Seats")
print(n.seats)
print("Divisor")
print(divisor)
mod.div.list <- vector()
## compute the apportionment using Hamilton's method
if (method == "hamilton"){
print(hamilton(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
}
## compute the apportionment using Jefferson's method
else if (method == "jefferson"){
print("Original Apportionment Table")
print(original.jefferson(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
print(jefferson(pop.top, div = divisor, n.seats = n.seats, n.places = n.places, mod.div.list = mod.div.list))
print(range.jefferson(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
}
## compute the apportionment using Huntington Hill's method
else if (method == "huntington.hill"){
print("Original Apportionment Table")
print(original.huntingtonhill(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
print(huntington.hill(pop.top, div = divisor, n.seats = n.seats, n.places = n.places, mod.div.list = mod.div.list))
print(range.huntingtonhill(pop.top, div = divisor, n.seats = n.seats, n.places = n.places))
}
}
## automatically generates a seed number
## However, seed numbers can be input manually
## Comment out sample(1:999, 1) and type in the desired seed number
## It will look like this: auto <- 123#sample(1:999, 1)
auto <- sample(100:999, 1)
print(auto)
## Solves the apportionment problem using each method
set.seed(auto)
build.apportionment(n.places = 5, method = "hamilton")
set.seed(auto)
build.apportionment(n.places = 5, method = "jefferson")
set.seed(auto)
build.apportionment(n.places = 5, method = "huntington.hill")
|
library(readxl)
ageandheight <- read_excel("/media/ronn/hd_ubuntu/workspace/my_git/My_R/ageandheight.xls", sheet = "Hoja2") #Upload the data
lmHeight = lm(height~age, data = ageandheight) #Create the linear regression
summary(lmHeight) #Review the results
|
/linear-regression.R
|
no_license
|
RonnyldoSilva/My_R
|
R
| false
| false
| 257
|
r
|
library(readxl)
ageandheight <- read_excel("/media/ronn/hd_ubuntu/workspace/my_git/My_R/ageandheight.xls", sheet = "Hoja2") #Upload the data
lmHeight = lm(height~age, data = ageandheight) #Create the linear regression
summary(lmHeight) #Review the results
|
#' @title Helper function to retrieve species information from the ENSEMBL API
#' @description This function interfaces with the ENSEMBL API
#' (http://rest.ensembl.org/info/species?content-type=application/json)
#' and internally stores the output to use this information for subsequent
#' retrieval function calls.
#' @author Hajk-Georg Drost
#' @noRd
get.ensembl.info <- function(update = FALSE) {
if (file.exists(file.path(tempdir(), "ensembl_info.tsv")) &&
!update) {
suppressWarnings(
ensembl.info <-
readr::read_tsv(
file.path(tempdir(), "ensembl_info.tsv"),
col_names = TRUE,
col_types = readr::cols(
division = readr::col_character(),
taxon_id = readr::col_integer(),
name = readr::col_character(),
release = readr::col_integer(),
display_name = readr::col_character(),
accession = readr::col_character(),
common_name = readr::col_character(),
assembly = readr::col_character()
)
)
)
} else {
rest_url <- "http://rest.ensembl.org/info/species?content-type=application/json"
rest_api_status <- curl::curl_fetch_memory(rest_url)
if (rest_api_status$status_code != 200) {
stop(
"The API 'http://rest.ensembl.org' does not seem to
work properly. Are you connected to the internet?
Is the homepage 'http://rest.ensembl.org' currently available?",
call. = FALSE
)
}
ensembl.info <-
tibble::as_tibble(
jsonlite::fromJSON(
rest_url
)$species
)
aliases <- groups <- NULL
ensembl.info <-
dplyr::select(ensembl.info, -aliases, -groups)
readr::write_tsv(ensembl.info,
file.path(tempdir(), "ensembl_info.tsv"))
}
return(ensembl.info)
}
|
/R/get.ensembl.info.R
|
no_license
|
AdrianS85/biomartr
|
R
| false
| false
| 2,236
|
r
|
#' @title Helper function to retrieve species information from the ENSEMBL API
#' @description This function interfaces with the ENSEMBL API
#' (http://rest.ensembl.org/info/species?content-type=application/json)
#' and internally stores the output to use this information for subsequent
#' retrieval function calls.
#' @author Hajk-Georg Drost
#' @noRd
get.ensembl.info <- function(update = FALSE) {
if (file.exists(file.path(tempdir(), "ensembl_info.tsv")) &&
!update) {
suppressWarnings(
ensembl.info <-
readr::read_tsv(
file.path(tempdir(), "ensembl_info.tsv"),
col_names = TRUE,
col_types = readr::cols(
division = readr::col_character(),
taxon_id = readr::col_integer(),
name = readr::col_character(),
release = readr::col_integer(),
display_name = readr::col_character(),
accession = readr::col_character(),
common_name = readr::col_character(),
assembly = readr::col_character()
)
)
)
} else {
rest_url <- "http://rest.ensembl.org/info/species?content-type=application/json"
rest_api_status <- curl::curl_fetch_memory(rest_url)
if (rest_api_status$status_code != 200) {
stop(
"The API 'http://rest.ensembl.org' does not seem to
work properly. Are you connected to the internet?
Is the homepage 'http://rest.ensembl.org' currently available?",
call. = FALSE
)
}
ensembl.info <-
tibble::as_tibble(
jsonlite::fromJSON(
rest_url
)$species
)
aliases <- groups <- NULL
ensembl.info <-
dplyr::select(ensembl.info, -aliases, -groups)
readr::write_tsv(ensembl.info,
file.path(tempdir(), "ensembl_info.tsv"))
}
return(ensembl.info)
}
|
library(INDperform)
### Name: model_gam
### Title: Modeling of indicator responses to single pressures with GAMs
### Aliases: model_gam
### ** Examples
# Using the Baltic Sea demo data in this package
dat_init <- ind_init(
ind_tbl = ind_ex[, c("Sprat", "Cod")],
press_tbl = press_ex[, c("Tsum", "Swin", "Fcod", "Fher")],
time = ind_ex[ ,1])
gam_tbl <- model_gam(dat_init)
# Any outlier?
gam_tbl$pres_outlier
# Exclude outliers by passing this list as input:
gam_tbl_out <- model_gam(dat_init, excl_outlier = gam_tbl$pres_outlier)
## No test:
# Using another error distribution
ind_sub <- round(exp(ind_ex[ ,c(2,8,9)]),0) # to unlog data and convert to integers
ind_tbl2 <- ind_init(ind_sub, press_ex, time = ind_ex$Year)
model_gam(ind_tbl2, family = poisson(link="log"))
## End(No test)
|
/data/genthat_extracted_code/INDperform/examples/model_gam.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 806
|
r
|
library(INDperform)
### Name: model_gam
### Title: Modeling of indicator responses to single pressures with GAMs
### Aliases: model_gam
### ** Examples
# Using the Baltic Sea demo data in this package
dat_init <- ind_init(
ind_tbl = ind_ex[, c("Sprat", "Cod")],
press_tbl = press_ex[, c("Tsum", "Swin", "Fcod", "Fher")],
time = ind_ex[ ,1])
gam_tbl <- model_gam(dat_init)
# Any outlier?
gam_tbl$pres_outlier
# Exclude outliers by passing this list as input:
gam_tbl_out <- model_gam(dat_init, excl_outlier = gam_tbl$pres_outlier)
## No test:
# Using another error distribution
ind_sub <- round(exp(ind_ex[ ,c(2,8,9)]),0) # to unlog data and convert to integers
ind_tbl2 <- ind_init(ind_sub, press_ex, time = ind_ex$Year)
model_gam(ind_tbl2, family = poisson(link="log"))
## End(No test)
|
rm(list = ls())
load("simulation.RData")
library(tidyverse)
get_data <- function(ridge, lars,
names = c(map_chr(lambdas, ~ paste0("λ=", .x)), "lars"))
tibble(
value = c(ridge$bias, lars$"bias^2",
ridge$variance, lars$variance),
index_name = rep(c("bias", "variance"), each = length(names)),
model_name = factor(rep(names, times = 2), levels = names))
barplot_simulation <- function(ridge, lars)
ggplot(get_data(ridge, lars),
aes(x = model_name, y = value, fill = index_name)) +
geom_bar(stat = "identity", position = "stack") +
xlab("Model") + ylab("Value") +
ggtitle("Reducible Error scomposition") +
guides(fill = guide_legend(title = "")) +
theme_minimal()
barplot_simulation(ridge_fixed, lars_fixed) %>%
ggsave(filename = "fixed_barplot.png",
height = 9, width = 16, dpi = 500)
barplot_simulation(ridge_random, lars_random) %>%
ggsave(filename = "random_barplot.png",
height = 9, width = 16, dpi = 500)
lineplot_simulation <- function(ridge, lars)
ggplot(get_data(ridge, lars),
aes(x = model_name, y = value,
group = index_name, colour = index_name)) +
geom_line() +
geom_point()
|
/plot_fixed.R
|
no_license
|
moiraghif/LARS
|
R
| false
| false
| 1,290
|
r
|
rm(list = ls())
load("simulation.RData")
library(tidyverse)
get_data <- function(ridge, lars,
names = c(map_chr(lambdas, ~ paste0("λ=", .x)), "lars"))
tibble(
value = c(ridge$bias, lars$"bias^2",
ridge$variance, lars$variance),
index_name = rep(c("bias", "variance"), each = length(names)),
model_name = factor(rep(names, times = 2), levels = names))
barplot_simulation <- function(ridge, lars)
ggplot(get_data(ridge, lars),
aes(x = model_name, y = value, fill = index_name)) +
geom_bar(stat = "identity", position = "stack") +
xlab("Model") + ylab("Value") +
ggtitle("Reducible Error scomposition") +
guides(fill = guide_legend(title = "")) +
theme_minimal()
barplot_simulation(ridge_fixed, lars_fixed) %>%
ggsave(filename = "fixed_barplot.png",
height = 9, width = 16, dpi = 500)
barplot_simulation(ridge_random, lars_random) %>%
ggsave(filename = "random_barplot.png",
height = 9, width = 16, dpi = 500)
lineplot_simulation <- function(ridge, lars)
ggplot(get_data(ridge, lars),
aes(x = model_name, y = value,
group = index_name, colour = index_name)) +
geom_line() +
geom_point()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_functions.R
\name{operations.list}
\alias{operations.list}
\title{Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.}
\usage{
operations.list(name, filter = NULL, pageSize = NULL, pageToken = NULL)
}
\arguments{
\item{name}{The operation collection name}
\item{pageSize}{The standard List page size}
\item{pageToken}{The standard List page token}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/dataproc/}{Google Documentation}
}
|
/googledataprocv1alpha1.auto/man/operations.list.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 1,039
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_functions.R
\name{operations.list}
\alias{operations.list}
\title{Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.}
\usage{
operations.list(name, filter = NULL, pageSize = NULL, pageToken = NULL)
}
\arguments{
\item{name}{The operation collection name}
\item{pageSize}{The standard List page size}
\item{pageToken}{The standard List page token}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/dataproc/}{Google Documentation}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/encodeString.R
\name{encodeString}
\alias{encodeString}
\title{fun_name}
\usage{
encodeString(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
|
/man/encodeString.Rd
|
no_license
|
granatb/RapeR
|
R
| false
| true
| 263
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/encodeString.R
\name{encodeString}
\alias{encodeString}
\title{fun_name}
\usage{
encodeString(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleBypasses.R
\name{setEdgeSourceArrowShapeBypass}
\alias{setEdgeSourceArrowShapeBypass}
\title{Set Edge Source Arrow Shape Bypass}
\usage{
setEdgeSourceArrowShapeBypass(edge.names, new.shapes, network = NULL,
base.url = .defaultBaseUrl)
}
\arguments{
\item{edge.names}{List of edge names}
\item{new.shapes}{List of shapes, or single value. See \link{getArrowShapes}.}
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Override the source arrow shape for particular edges.
}
\details{
This method permanently overrides any default values or mappings
defined for this visual property of the edge or edges specified. This method
ultimately calls the generic function, \link{setEdgePropertyBypass}, which
can be used to set any visual property. To restore defaults and mappings, use
\link{clearEdgePropertyBypass}.
}
\examples{
\donttest{
setEdgeSourceArrowShapeBypass()
}
}
\seealso{
{
\link{setEdgePropertyBypass},
\link{clearEdgePropertyBypass}
}
}
|
/man/setEdgeSourceArrowShapeBypass.Rd
|
permissive
|
shraddhapai/RCy3
|
R
| false
| true
| 1,363
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleBypasses.R
\name{setEdgeSourceArrowShapeBypass}
\alias{setEdgeSourceArrowShapeBypass}
\title{Set Edge Source Arrow Shape Bypass}
\usage{
setEdgeSourceArrowShapeBypass(edge.names, new.shapes, network = NULL,
base.url = .defaultBaseUrl)
}
\arguments{
\item{edge.names}{List of edge names}
\item{new.shapes}{List of shapes, or single value. See \link{getArrowShapes}.}
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Override the source arrow shape for particular edges.
}
\details{
This method permanently overrides any default values or mappings
defined for this visual property of the edge or edges specified. This method
ultimately calls the generic function, \link{setEdgePropertyBypass}, which
can be used to set any visual property. To restore defaults and mappings, use
\link{clearEdgePropertyBypass}.
}
\examples{
\donttest{
setEdgeSourceArrowShapeBypass()
}
}
\seealso{
{
\link{setEdgePropertyBypass},
\link{clearEdgePropertyBypass}
}
}
|
testlist <- list(hi = 5.84121309954418e+199, lo = 7.25785896681726e+193, mu = 6.86702135111641e+180, sig = 9.70158285081333e+189)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610046724-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 191
|
r
|
testlist <- list(hi = 5.84121309954418e+199, lo = 7.25785896681726e+193, mu = 6.86702135111641e+180, sig = 9.70158285081333e+189)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
my.mvrnorm<-function(n){
y1<-rnorm(n)
y2<-rnorm(n)
x1<-y1
x2<-0.5 * y1 + sqrt(3)/2 * y2
return(data.frame(x1,x2))
}
y<-my.mvrnorm(100000)
plot(y,pch=16,cex=0.3)
Sigma = matrix(c(1,0.5,0.5,1),2,2)
A= t(chol(Sigma))
A%*%t(A)
|
/R-class problems/演習6.1.3.R
|
no_license
|
brishtiteveja/R-Statistical-Analysis-Projects
|
R
| false
| false
| 230
|
r
|
my.mvrnorm<-function(n){
y1<-rnorm(n)
y2<-rnorm(n)
x1<-y1
x2<-0.5 * y1 + sqrt(3)/2 * y2
return(data.frame(x1,x2))
}
y<-my.mvrnorm(100000)
plot(y,pch=16,cex=0.3)
Sigma = matrix(c(1,0.5,0.5,1),2,2)
A= t(chol(Sigma))
A%*%t(A)
|
library(BinNonNor)
### Name: validation.corr
### Title: Validates the specified correlation matrix
### Aliases: validation.corr
### ** Examples
n.BB=2
n.NN=4
corr.vec=NULL
corr.mat=matrix(c(1.0,-0.3,-0.3,-0.3,-0.3,-0.3,
-0.3,1.0,-0.3,-0.3,-0.3,-0.3,
-0.3,-0.3,1.0,0.4,0.5,0.6,
-0.3,-0.3,0.4,1.0,0.7,0.8,
-0.3,-0.3,0.5,0.7,1.0,0.9,
-0.3,-0.3,0.6,0.8,0.9,1.0),6,byrow=TRUE)
validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
n.BB=2
n.NN=4
corr.vec=c(-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,0.4,0.5,0.6,0.7,0.8,0.9)
validation.corr(n.BB,n.NN,corr.vec,corr.mat=NULL)
## Not run:
##D n.BB=0
##D n.NN=4
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
##D n.BB=2
##D n.NN=0
##D validation.corr(n.BB,n.NN=0,corr.vec=NULL,corr.mat)
##D
##D corr.matc=corr.mat[3:6,3:6]
##D validation.corr(n.BB=0,n.NN=4,corr.vec=NULL,corr.mat=corr.matc)
##D
##D corr.mat[2,1]=0.5
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
##D corr.mat[1,2]=0.5
##D corr.mat[3,1]=1.5
##D corr.mat[1,3]=1.5
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
##D npd<-matrix(c(1, 0.477, 0.644, 0.478, 0.651, 0.826,
##D 0.477, 1, 0.516, 0.233, 0.682, 0.75,
##D 0.644, 0.516, 1, 0.599, 0.581, 0.742,
##D 0.478, 0.233, 0.599, 1, 0.741, 0.8,
##D 0.651, 0.682, 0.581, 0.741, 1, 0.798,
##D 0.826, 0.75, 0.742, 0.8, 0.798, 1),
##D nrow = 6, ncol = 6)
##D
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat=npd)
##D
##D n.BB=1
##D n.NN=0
##D corr.mat<-diag(1)
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
## End(Not run)
|
/data/genthat_extracted_code/BinNonNor/examples/validation.corr.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,650
|
r
|
library(BinNonNor)
### Name: validation.corr
### Title: Validates the specified correlation matrix
### Aliases: validation.corr
### ** Examples
n.BB=2
n.NN=4
corr.vec=NULL
corr.mat=matrix(c(1.0,-0.3,-0.3,-0.3,-0.3,-0.3,
-0.3,1.0,-0.3,-0.3,-0.3,-0.3,
-0.3,-0.3,1.0,0.4,0.5,0.6,
-0.3,-0.3,0.4,1.0,0.7,0.8,
-0.3,-0.3,0.5,0.7,1.0,0.9,
-0.3,-0.3,0.6,0.8,0.9,1.0),6,byrow=TRUE)
validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
n.BB=2
n.NN=4
corr.vec=c(-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,0.4,0.5,0.6,0.7,0.8,0.9)
validation.corr(n.BB,n.NN,corr.vec,corr.mat=NULL)
## Not run:
##D n.BB=0
##D n.NN=4
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
##D n.BB=2
##D n.NN=0
##D validation.corr(n.BB,n.NN=0,corr.vec=NULL,corr.mat)
##D
##D corr.matc=corr.mat[3:6,3:6]
##D validation.corr(n.BB=0,n.NN=4,corr.vec=NULL,corr.mat=corr.matc)
##D
##D corr.mat[2,1]=0.5
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
##D corr.mat[1,2]=0.5
##D corr.mat[3,1]=1.5
##D corr.mat[1,3]=1.5
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
##D npd<-matrix(c(1, 0.477, 0.644, 0.478, 0.651, 0.826,
##D 0.477, 1, 0.516, 0.233, 0.682, 0.75,
##D 0.644, 0.516, 1, 0.599, 0.581, 0.742,
##D 0.478, 0.233, 0.599, 1, 0.741, 0.8,
##D 0.651, 0.682, 0.581, 0.741, 1, 0.798,
##D 0.826, 0.75, 0.742, 0.8, 0.798, 1),
##D nrow = 6, ncol = 6)
##D
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat=npd)
##D
##D n.BB=1
##D n.NN=0
##D corr.mat<-diag(1)
##D validation.corr(n.BB,n.NN,corr.vec=NULL,corr.mat)
##D
## End(Not run)
|
\docType{methods}
\name{plot}
\alias{plot}
\alias{plot,kMeansResult}
\alias{plot,linearRegressionResult,ANY-method}
\alias{plot,logisticRegressionResult,ANY-method}
\title{plot method}
\arguments{
\item{x}{A \code{kMeans} instance}
\item{trace}{tracing centroids when algorithm progress}
\item{object}{A \code{linearRegressionResult} instance.}
\item{x}{A \code{svmResult} instance}
\item{X}{data matrix}
\item{y}{class label, 0 or 1}
\item{type}{one of linear or nonlinear}
\item{title}{title}
\item{xlab}{xlab}
\item{ylab}{ylab}
\item{object}{A \code{logisticRegressionResult}
instance.}
}
\value{
graph
ggplot object
ggplot2 graph object
ggplot object
}
\description{
plot method for \code{kMeansResult} instance
plot method for \code{linearRegressionResult} instance
plot method for \code{svmResult} instance
plot method for \code{logisticRegressionResult} instance
}
\author{
Guangchuang Yu \url{http://ygc.name}
Guangchuang Yu \url{http://ygc.name}
Guangchuang Yu \url{http://ygc.name}
Guangchuang Yu \url{http://ygc.name}
}
|
/man/plot-methods.Rd
|
no_license
|
ameenzhao/mlass
|
R
| false
| false
| 1,099
|
rd
|
\docType{methods}
\name{plot}
\alias{plot}
\alias{plot,kMeansResult}
\alias{plot,linearRegressionResult,ANY-method}
\alias{plot,logisticRegressionResult,ANY-method}
\title{plot method}
\arguments{
\item{x}{A \code{kMeans} instance}
\item{trace}{tracing centroids when algorithm progress}
\item{object}{A \code{linearRegressionResult} instance.}
\item{x}{A \code{svmResult} instance}
\item{X}{data matrix}
\item{y}{class label, 0 or 1}
\item{type}{one of linear or nonlinear}
\item{title}{title}
\item{xlab}{xlab}
\item{ylab}{ylab}
\item{object}{A \code{logisticRegressionResult}
instance.}
}
\value{
graph
ggplot object
ggplot2 graph object
ggplot object
}
\description{
plot method for \code{kMeansResult} instance
plot method for \code{linearRegressionResult} instance
plot method for \code{svmResult} instance
plot method for \code{logisticRegressionResult} instance
}
\author{
Guangchuang Yu \url{http://ygc.name}
Guangchuang Yu \url{http://ygc.name}
Guangchuang Yu \url{http://ygc.name}
Guangchuang Yu \url{http://ygc.name}
}
|
################################################################################
## Article Evolving changes in mortality of 13,301 critically ill adult
## patients with COVID-19 over eight months
##
## Random-effects Cox model (mortality model - main analysis)
## Leonardo S.L. Bastos (lslbastos), Pedro Kurtz
##
################################################################################
# Library -----------------------------------------------------------------
library(tidyverse)
library(tidylog)
library(WeightIt)
library(coxme)
library(broom)
library(ehahelper)
# Obtaining main data frame of COVID-19 patients (with preparations)
source("code/Data_Analysis/descriptive_stats_covid.R")
################################################################################
# Data preparation --------------------------------------------------------
## Defining data for modeling (Cox Mortality)
## Sample for model development: patients that required advanced respiratory support
# N = 4,188
df_model <-
df_covid_admissions %>%
filter(
VentSupport != "none"
) %>%
droplevels() %>%
mutate(
first_resp_support = case_when(
VentSupport %in% c("only_niv", "niv_to_mv") ~ "niv_first",
VentSupport %in% c("only_mv") ~ "imv_first",
),
first_resp_support = factor(first_resp_support,
levels = c("imv_first", "niv_first")),
emergency = if_else(AdmissionSource == "Emergency", 1, 0),
period = factor(period, levels = c(2, 1, 3, 4)),
Saps3Q = cut_number(Saps3Points, n = 4) # SAPS3 quartiles
) %>%
select(
outcome_follow_up,
hosp_los_follow_up,
first_resp_support,
Age,
Gender,
Saps3Points,
Saps3Q,
SofaScore,
idade_grupo,
MFI_level,
MFIpoints,
VentSupport,
emergency,
hypertension,
diabetes,
cardio_disease,
imunossupression,
cerebro_disease,
obesity,
copd_asthma,
malignancy,
chronic_kidney,
tobacco,
liver_cirrhosis,
PaO2FiO2,
PaO2FiO21h_level,
IsVasopressors,
IsRenalReplacementTherapy,
ResourceIsVasopressors,
ResourceIsRenalReplacementTherapy,
period,
HospitalCode
) %>%
mutate_at(
c("emergency",
"hypertension",
"diabetes",
"cardio_disease",
"imunossupression",
"hypertension",
"diabetes",
"cardio_disease",
"imunossupression",
"cerebro_disease",
"obesity",
"copd_asthma",
"malignancy",
"chronic_kidney",
"tobacco",
"liver_cirrhosis",
"IsVasopressors",
"IsRenalReplacementTherapy",
"ResourceIsVasopressors",
"ResourceIsRenalReplacementTherapy"),
function(x) { return(as.factor(if_else(x == 1, "yes", "no"))) }
)
################################################################################
# Propensity score estimation (final model in 'propensity_score_mode.R')
model_no_var <-
first_resp_support ~
Gender +
idade_grupo +
MFI_level +
SofaScore +
period +
emergency +
cardio_disease +
IsVasopressors
## Estimating Propensity scores and weights - "ATE" estimand
ps_values <-
weightit(
model_no_var,
family = "binomial",
estimand = "ATE",
data = df_model,
method = "ps"
)
## Obtaining propensity scores and IPT weights for each patients
df_cox_model_ps_ate <-
df_model %>%
bind_cols(
ps_ate = ps_values$ps,
ps_w_ate = ps_values$weights
)
### Propensity Score model evaluation
## Results from the propensity score model fitting and choice are in
## "code/2_Data_Analysis/data_model_propensity_main.R"
################################################################################
# Random-effects cox model (Frailty model) --------------------------------
## "Full model": demographics, selected comorbidities and complications
cox_model_full <-
Surv(hosp_los_follow_up, outcome_follow_up) ~
Gender +
idade_grupo +
MFI_level +
SofaScore +
Saps3Q +
emergency +
hypertension +
diabetes +
obesity +
copd_asthma +
cardio_disease +
ResourceIsVasopressors +
ResourceIsRenalReplacementTherapy +
first_resp_support +
period +
(1 | HospitalCode)
df_cox_model_full <-
coxme(
cox_model_full,
weights = ps_w_ate,
data = df_cox_model_ps_ate
)
df_cox_model_full_results <-
df_cox_model_full %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 2),
" (", round(conf.low, 2), " - ", round(conf.high, 2), ")")
) %>%
select(term, HR_ci, p_adj)
## Model w/ non-significant comorbidities: demographics and complications
cox_model_no_comorb <-
Surv(hosp_los_follow_up, outcome_follow_up) ~
Gender +
idade_grupo +
MFI_level +
SofaScore +
Saps3Q +
emergency +
hypertension +
# diabetes +
# obesity +
# copd_asthma +
cardio_disease +
ResourceIsVasopressors +
ResourceIsRenalReplacementTherapy +
first_resp_support +
period +
(1 | HospitalCode)
df_cox_model_no_comorb <-
coxme(
cox_model_no_comorb,
weights = ps_w_ate,
data = df_cox_model_ps_ate
)
df_cox_model_no_comorb_results <-
df_cox_model_no_comorb %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 3), " (",
round(conf.low, 2), " - ", round(conf.high, 2), ")")
) %>%
select(term, HR_ci, p_adj)
## Comparison of estiamted models (all admissions, no sign comorb, no sign var)
writexl::write_xlsx(
left_join(
df_cox_model_full_results,
df_cox_model_no_comorb_results %>%
rename(HR_ci_comorb = HR_ci, p_adj_comorb = p_adj)
, by = c("term" = "term")
)
, "output/main/model/table_model_backward_comparison.xlsx")
## Comparison of models using estimated AIC and BIC
df_models_aic_bic <-
tibble(
model = c("cox_model_full",
"cox_model_no_comorb"),
AIC = c(
2 * (df_cox_model_full$loglik[2] - df_cox_model_full$loglik[1]) - 2 * df_cox_model_full$df[1],
2 * (df_cox_model_no_comorb$loglik[2] - df_cox_model_no_comorb$loglik[1]) - 2 * df_cox_model_no_comorb$df[1]
),
BIC = c(
2 * (df_cox_model_full$loglik[2] - df_cox_model_full$loglik[1]) - log(df_cox_model_full$n[1]) * df_cox_model_full$df[1],
2 * (df_cox_model_no_comorb$loglik[2] - df_cox_model_no_comorb$loglik[1]) - log(df_cox_model_no_comorb$n[1]) * df_cox_model_no_comorb$df[1]
)
)
writexl::write_xlsx(
df_models_aic_bic
, "output/main/model/table_model_backward_aic_bic.xlsx"
)
## The model with lowest AIC/BIC was 'cox_model_full' (all variables of interest)
# Finished
################################################################################
# Sensitivity Analysis: Propensity Scores - SMR-weighted -------
## Estimating Propensity scores and weights - "ATT" estimand or SMR-weighted
ps_values_att <- weightit(model_no_var,
family = "binomial",
estimand = "ATT",
data = df_model,
method = "ps")
df_cox_model_ps_att <-
df_model %>%
bind_cols(
ps_att = ps_values_att$ps,
ps_w_att = ps_values_att$weights
)
## "Full model": demographics, selected comorbidities and complications
df_cox_model_full_att <-
coxme(
cox_model_full,
weights = ps_w_att,
data = df_cox_model_ps_att
)
df_cox_model_full_att_results <-
df_cox_model_full_att %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 3), " (", round(conf.low, 3), " - ", round(conf.high, 3), ")")
) %>%
select(term, HR_ci, p_adj)
writexl::write_xlsx(
df_cox_model_full_att_results
, "output/main/model/table_cox_model_full_ATT.xlsx"
)
# Sensitivity Analysis: Propensity Scores - Trim upper 5% --------
## Estimating Propensity scores - "ATT" estimand or SMR-weighted
df_cox_model_ps_ate_trim <-
df_model %>%
bind_cols(
ps_ate = ps_values$ps,
ps_w_ate = ps_values$weights
) %>%
filter(
ps_ate <= quantile(ps_ate, probs = 0.95)
)
## "Full model": demographics, selected comorbidities and complications - All admissions N = 13,301
df_cox_model_full_trim <-
coxme(
cox_model_full,
weights = ps_w_ate,
data = df_cox_model_ps_ate_trim
)
df_cox_model_full_trim_results <-
df_cox_model_full_trim %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 3), " (", round(conf.low, 3), " - ", round(conf.high, 3), ")")
) %>%
select(term, HR_ci, p_adj)
writexl::write_xlsx(
df_cox_model_full_trim_results
, "output/main/model/table_cox_model_full_ATE_trim.xlsx"
)
# Analysis of quantitative variables for Cox model ------------------------
## Table Age groups x 60-day in-hospital mortality
df_age_mortality <-
df_model %>%
group_by(idade_grupo) %>%
summarise(
deaths = sum(outcome_follow_up),
total = n(),
mortal = sum(outcome_follow_up) / n(),
mortal_ratio = paste0(sum(outcome_follow_up), "/", n())
) %>%
ungroup()
## Age x 60-day in-hospital mortality
plot_age_mortality <-
df_age_mortality %>%
ggplot() +
geom_col(aes(x = idade_grupo, y = mortal)) +
scale_y_continuous(labels = scales::percent_format()) +
labs(x = "Age (years)", y = "60-day in-hospital mortality") +
theme_bw()
## Table SOFA x 60-day in-hospital mortality
df_sofa_mortality <-
df_model %>%
group_by(SofaScore) %>%
summarise(
deaths = sum(outcome_follow_up),
total = n(),
mortal = sum(outcome_follow_up) / n(),
mortal_ratio = paste0(sum(outcome_follow_up), "/", n())
) %>%
ungroup()
## SOFA x Mortality
plot_sofa_mortality <-
df_sofa_mortality %>%
ggplot() +
geom_col(aes(x = SofaScore, y = mortal)) +
scale_y_continuous(labels = scales::percent_format()) +
labs(x = "SOFA score", y = "60-day in-hospital mortality") +
theme_bw()
## Table SAPS3 x in-hospital mortality
df_saps_mortality <-
df_model %>%
mutate(Saps3decil = cut_number(Saps3Points, n = 4)) %>%
group_by(Saps3decil) %>%
summarise(
deaths = sum(outcome_follow_up),
total = n(),
mortal = sum(outcome_follow_up) / n(),
mortal_ratio = paste0(sum(outcome_follow_up), "/", n())
) %>%
ungroup()
## SAPS3 x Mortality
plot_saps_mortality <-
df_saps_mortality %>%
ggplot() +
geom_col(aes(x = Saps3decil, y = mortal)) +
scale_y_continuous(labels = scales::percent_format()) +
scale_x_discrete(labels = c("<= 42", "43 to 50", "51 to 61", "> 61")) +
labs(x = "SAPS-3 (quartiles)", y = "60-day in-hospital mortality") +
theme_bw()
ggsave("output/supplementary/figures_supplementary/figure_Age_SOFA_SAPS_60day_mortality.png",
ggpubr::ggarrange(
plot_age_mortality,
plot_saps_mortality,
plot_sofa_mortality,
nrow = 3, ncol = 1
),
width = 4, height = 8, dpi = 800)
## Martingale's residuals for continuous variables
survminer::ggcoxfunctional(
coxph(Surv(hosp_los_follow_up, outcome_follow_up) ~
Age +
SofaScore +
Saps3Points,
data = df_cox_model_ps_ate)
)
# Finished
|
/Data_Analysis/model_cox_mortality_adv_resp_support.R
|
no_license
|
lslbastos/COVID19_ICU_Changes_Mortality
|
R
| false
| false
| 12,362
|
r
|
################################################################################
## Article Evolving changes in mortality of 13,301 critically ill adult
## patients with COVID-19 over eight months
##
## Random-effects Cox model (mortality model - main analysis)
## Leonardo S.L. Bastos (lslbastos), Pedro Kurtz
##
################################################################################
# Library -----------------------------------------------------------------
library(tidyverse)
library(tidylog)
library(WeightIt)
library(coxme)
library(broom)
library(ehahelper)
# Obtaining main data frame of COVID-19 patients (with preparations)
source("code/Data_Analysis/descriptive_stats_covid.R")
################################################################################
# Data preparation --------------------------------------------------------
## Defining data for modeling (Cox Mortality)
## Sample for model development: patients that required advanced respiratory support
# N = 4,188
df_model <-
df_covid_admissions %>%
filter(
VentSupport != "none"
) %>%
droplevels() %>%
mutate(
first_resp_support = case_when(
VentSupport %in% c("only_niv", "niv_to_mv") ~ "niv_first",
VentSupport %in% c("only_mv") ~ "imv_first",
),
first_resp_support = factor(first_resp_support,
levels = c("imv_first", "niv_first")),
emergency = if_else(AdmissionSource == "Emergency", 1, 0),
period = factor(period, levels = c(2, 1, 3, 4)),
Saps3Q = cut_number(Saps3Points, n = 4) # SAPS3 quartiles
) %>%
select(
outcome_follow_up,
hosp_los_follow_up,
first_resp_support,
Age,
Gender,
Saps3Points,
Saps3Q,
SofaScore,
idade_grupo,
MFI_level,
MFIpoints,
VentSupport,
emergency,
hypertension,
diabetes,
cardio_disease,
imunossupression,
cerebro_disease,
obesity,
copd_asthma,
malignancy,
chronic_kidney,
tobacco,
liver_cirrhosis,
PaO2FiO2,
PaO2FiO21h_level,
IsVasopressors,
IsRenalReplacementTherapy,
ResourceIsVasopressors,
ResourceIsRenalReplacementTherapy,
period,
HospitalCode
) %>%
mutate_at(
c("emergency",
"hypertension",
"diabetes",
"cardio_disease",
"imunossupression",
"hypertension",
"diabetes",
"cardio_disease",
"imunossupression",
"cerebro_disease",
"obesity",
"copd_asthma",
"malignancy",
"chronic_kidney",
"tobacco",
"liver_cirrhosis",
"IsVasopressors",
"IsRenalReplacementTherapy",
"ResourceIsVasopressors",
"ResourceIsRenalReplacementTherapy"),
function(x) { return(as.factor(if_else(x == 1, "yes", "no"))) }
)
################################################################################
# Propensity score estimation (final model in 'propensity_score_mode.R')
model_no_var <-
first_resp_support ~
Gender +
idade_grupo +
MFI_level +
SofaScore +
period +
emergency +
cardio_disease +
IsVasopressors
## Estimating Propensity scores and weights - "ATE" estimand
ps_values <-
weightit(
model_no_var,
family = "binomial",
estimand = "ATE",
data = df_model,
method = "ps"
)
## Obtaining propensity scores and IPT weights for each patients
df_cox_model_ps_ate <-
df_model %>%
bind_cols(
ps_ate = ps_values$ps,
ps_w_ate = ps_values$weights
)
### Propensity Score model evaluation
## Results from the propensity score model fitting and choice are in
## "code/2_Data_Analysis/data_model_propensity_main.R"
################################################################################
# Random-effects cox model (Frailty model) --------------------------------
## "Full model": demographics, selected comorbidities and complications
cox_model_full <-
Surv(hosp_los_follow_up, outcome_follow_up) ~
Gender +
idade_grupo +
MFI_level +
SofaScore +
Saps3Q +
emergency +
hypertension +
diabetes +
obesity +
copd_asthma +
cardio_disease +
ResourceIsVasopressors +
ResourceIsRenalReplacementTherapy +
first_resp_support +
period +
(1 | HospitalCode)
df_cox_model_full <-
coxme(
cox_model_full,
weights = ps_w_ate,
data = df_cox_model_ps_ate
)
df_cox_model_full_results <-
df_cox_model_full %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 2),
" (", round(conf.low, 2), " - ", round(conf.high, 2), ")")
) %>%
select(term, HR_ci, p_adj)
## Model w/ non-significant comorbidities: demographics and complications
cox_model_no_comorb <-
Surv(hosp_los_follow_up, outcome_follow_up) ~
Gender +
idade_grupo +
MFI_level +
SofaScore +
Saps3Q +
emergency +
hypertension +
# diabetes +
# obesity +
# copd_asthma +
cardio_disease +
ResourceIsVasopressors +
ResourceIsRenalReplacementTherapy +
first_resp_support +
period +
(1 | HospitalCode)
df_cox_model_no_comorb <-
coxme(
cox_model_no_comorb,
weights = ps_w_ate,
data = df_cox_model_ps_ate
)
df_cox_model_no_comorb_results <-
df_cox_model_no_comorb %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 3), " (",
round(conf.low, 2), " - ", round(conf.high, 2), ")")
) %>%
select(term, HR_ci, p_adj)
## Comparison of estiamted models (all admissions, no sign comorb, no sign var)
writexl::write_xlsx(
left_join(
df_cox_model_full_results,
df_cox_model_no_comorb_results %>%
rename(HR_ci_comorb = HR_ci, p_adj_comorb = p_adj)
, by = c("term" = "term")
)
, "output/main/model/table_model_backward_comparison.xlsx")
## Comparison of models using estimated AIC and BIC
df_models_aic_bic <-
tibble(
model = c("cox_model_full",
"cox_model_no_comorb"),
AIC = c(
2 * (df_cox_model_full$loglik[2] - df_cox_model_full$loglik[1]) - 2 * df_cox_model_full$df[1],
2 * (df_cox_model_no_comorb$loglik[2] - df_cox_model_no_comorb$loglik[1]) - 2 * df_cox_model_no_comorb$df[1]
),
BIC = c(
2 * (df_cox_model_full$loglik[2] - df_cox_model_full$loglik[1]) - log(df_cox_model_full$n[1]) * df_cox_model_full$df[1],
2 * (df_cox_model_no_comorb$loglik[2] - df_cox_model_no_comorb$loglik[1]) - log(df_cox_model_no_comorb$n[1]) * df_cox_model_no_comorb$df[1]
)
)
writexl::write_xlsx(
df_models_aic_bic
, "output/main/model/table_model_backward_aic_bic.xlsx"
)
## The model with lowest AIC/BIC was 'cox_model_full' (all variables of interest)
# Finished
################################################################################
# Sensitivity Analysis: Propensity Scores - SMR-weighted -------
## Estimating Propensity scores and weights - "ATT" estimand or SMR-weighted
ps_values_att <- weightit(model_no_var,
family = "binomial",
estimand = "ATT",
data = df_model,
method = "ps")
df_cox_model_ps_att <-
df_model %>%
bind_cols(
ps_att = ps_values_att$ps,
ps_w_att = ps_values_att$weights
)
## "Full model": demographics, selected comorbidities and complications
df_cox_model_full_att <-
coxme(
cox_model_full,
weights = ps_w_att,
data = df_cox_model_ps_att
)
df_cox_model_full_att_results <-
df_cox_model_full_att %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 3), " (", round(conf.low, 3), " - ", round(conf.high, 3), ")")
) %>%
select(term, HR_ci, p_adj)
writexl::write_xlsx(
df_cox_model_full_att_results
, "output/main/model/table_cox_model_full_ATT.xlsx"
)
# Sensitivity Analysis: Propensity Scores - Trim upper 5% --------
## Estimating Propensity scores - "ATT" estimand or SMR-weighted
df_cox_model_ps_ate_trim <-
df_model %>%
bind_cols(
ps_ate = ps_values$ps,
ps_w_ate = ps_values$weights
) %>%
filter(
ps_ate <= quantile(ps_ate, probs = 0.95)
)
## "Full model": demographics, selected comorbidities and complications - All admissions N = 13,301
df_cox_model_full_trim <-
coxme(
cox_model_full,
weights = ps_w_ate,
data = df_cox_model_ps_ate_trim
)
df_cox_model_full_trim_results <-
df_cox_model_full_trim %>%
tidy(exponentiate = TRUE) %>%
mutate(
p_adj = ifelse(p.value < 0.001, "<0.001", round(p.value, 3)),
HR_ci = paste0(round(estimate, 3), " (", round(conf.low, 3), " - ", round(conf.high, 3), ")")
) %>%
select(term, HR_ci, p_adj)
writexl::write_xlsx(
df_cox_model_full_trim_results
, "output/main/model/table_cox_model_full_ATE_trim.xlsx"
)
# Analysis of quantitative variables for Cox model ------------------------
## Table Age groups x 60-day in-hospital mortality
df_age_mortality <-
df_model %>%
group_by(idade_grupo) %>%
summarise(
deaths = sum(outcome_follow_up),
total = n(),
mortal = sum(outcome_follow_up) / n(),
mortal_ratio = paste0(sum(outcome_follow_up), "/", n())
) %>%
ungroup()
## Age x 60-day in-hospital mortality
plot_age_mortality <-
df_age_mortality %>%
ggplot() +
geom_col(aes(x = idade_grupo, y = mortal)) +
scale_y_continuous(labels = scales::percent_format()) +
labs(x = "Age (years)", y = "60-day in-hospital mortality") +
theme_bw()
## Table SOFA x 60-day in-hospital mortality
df_sofa_mortality <-
df_model %>%
group_by(SofaScore) %>%
summarise(
deaths = sum(outcome_follow_up),
total = n(),
mortal = sum(outcome_follow_up) / n(),
mortal_ratio = paste0(sum(outcome_follow_up), "/", n())
) %>%
ungroup()
## SOFA x Mortality
plot_sofa_mortality <-
df_sofa_mortality %>%
ggplot() +
geom_col(aes(x = SofaScore, y = mortal)) +
scale_y_continuous(labels = scales::percent_format()) +
labs(x = "SOFA score", y = "60-day in-hospital mortality") +
theme_bw()
## Table SAPS3 x in-hospital mortality
df_saps_mortality <-
df_model %>%
mutate(Saps3decil = cut_number(Saps3Points, n = 4)) %>%
group_by(Saps3decil) %>%
summarise(
deaths = sum(outcome_follow_up),
total = n(),
mortal = sum(outcome_follow_up) / n(),
mortal_ratio = paste0(sum(outcome_follow_up), "/", n())
) %>%
ungroup()
## SAPS3 x Mortality
plot_saps_mortality <-
df_saps_mortality %>%
ggplot() +
geom_col(aes(x = Saps3decil, y = mortal)) +
scale_y_continuous(labels = scales::percent_format()) +
scale_x_discrete(labels = c("<= 42", "43 to 50", "51 to 61", "> 61")) +
labs(x = "SAPS-3 (quartiles)", y = "60-day in-hospital mortality") +
theme_bw()
ggsave("output/supplementary/figures_supplementary/figure_Age_SOFA_SAPS_60day_mortality.png",
ggpubr::ggarrange(
plot_age_mortality,
plot_saps_mortality,
plot_sofa_mortality,
nrow = 3, ncol = 1
),
width = 4, height = 8, dpi = 800)
## Martingale's residuals for continuous variables
survminer::ggcoxfunctional(
coxph(Surv(hosp_los_follow_up, outcome_follow_up) ~
Age +
SofaScore +
Saps3Points,
data = df_cox_model_ps_ate)
)
# Finished
|
#' Remove/Replace/Extract All Caps
#'
#' Remove/replace/extract 'all caps' words containing 2 or more consecutive
#' upper case letters from a string.
#'
#' @param text.var The text variable.
#' @param trim logical. If \code{TRUE} removes leading and trailing white
#' spaces.
#' @param clean trim logical. If \code{TRUE} extra white spaces and escaped
#' character will be removed.
#' @param pattern A character string containing a regular expression (or
#' character string for \code{fixed = TRUE}) to be matched in the given
#' character vector. Default, \code{@@rm_caps} uses the
#' \code{rm_caps} regex from the regular expression dictionary from
#' the \code{dictionary} argument.
#' @param replacement Replacement for matched \code{pattern}.
#' @param extract logical. If \code{TRUE} the all caps strings are extracted
#' into a list of vectors.
#' @param dictionary A dictionary of canned regular expressions to search within
#' if \code{pattern} begins with \code{"@@rm_"}.
#' @param \dots Other arguments passed to \code{\link[base]{gsub}}.
#' @return Returns a character string with "all caps" removed.
#' @keywords caps capital
#' @family rm_ functions
#' @include utils.R
#' @export
#' @rdname rm_caps
#' @seealso \code{\link[base]{gsub}},
#' \code{\link[stringi]{stri_extract_all_regex}}
#' @examples
#' x <- c("UGGG! When I use caps I am YELLING!")
#' rm_caps(x)
#' rm_caps(x, replacement="\\L\\1")
#' ex_caps(x)
rm_caps <- hijack(rm_default, pattern = "@rm_caps")
#' @export
#' @rdname rm_caps
ex_caps <- hijack(rm_caps, extract=TRUE)
|
/R/rm_caps.R
|
no_license
|
cran/qdapRegex
|
R
| false
| false
| 1,609
|
r
|
#' Remove/Replace/Extract All Caps
#'
#' Remove/replace/extract 'all caps' words containing 2 or more consecutive
#' upper case letters from a string.
#'
#' @param text.var The text variable.
#' @param trim logical. If \code{TRUE} removes leading and trailing white
#' spaces.
#' @param clean trim logical. If \code{TRUE} extra white spaces and escaped
#' character will be removed.
#' @param pattern A character string containing a regular expression (or
#' character string for \code{fixed = TRUE}) to be matched in the given
#' character vector. Default, \code{@@rm_caps} uses the
#' \code{rm_caps} regex from the regular expression dictionary from
#' the \code{dictionary} argument.
#' @param replacement Replacement for matched \code{pattern}.
#' @param extract logical. If \code{TRUE} the all caps strings are extracted
#' into a list of vectors.
#' @param dictionary A dictionary of canned regular expressions to search within
#' if \code{pattern} begins with \code{"@@rm_"}.
#' @param \dots Other arguments passed to \code{\link[base]{gsub}}.
#' @return Returns a character string with "all caps" removed.
#' @keywords caps capital
#' @family rm_ functions
#' @include utils.R
#' @export
#' @rdname rm_caps
#' @seealso \code{\link[base]{gsub}},
#' \code{\link[stringi]{stri_extract_all_regex}}
#' @examples
#' x <- c("UGGG! When I use caps I am YELLING!")
#' rm_caps(x)
#' rm_caps(x, replacement="\\L\\1")
#' ex_caps(x)
rm_caps <- hijack(rm_default, pattern = "@rm_caps")
#' @export
#' @rdname rm_caps
ex_caps <- hijack(rm_caps, extract=TRUE)
|
#########################
# MODEL FITTING for DAS #
#########################
# Author: Giovanni Colavizza
require(ggplot2)
# load the dataset and make transformations
df <- read.csv("dataset/export_full.csv", sep = ";")
df$has_das <- factor(df$has_das)
df$is_plos <- factor(df$is_plos)
df$is_bmc <- factor(df$is_bmc)
df$has_month <- factor(df$has_month)
df$das_class <- factor(as.integer(df$das_class))
df$j_lower <- factor(df$j_lower)
df$journal_domain <- factor(df$journal_domain)
df$journal_field <- factor(df$journal_field)
df$journal_subfield <- factor(df$journal_subfield)
# filter for NaN and by time (assuming to use 3y citation window: exclude 2016 to 2018)
df_filtered <- df[(!is.na(df$h_index_mean))&(df$p_year<2016),]
# log-transform (add 1 to bound between zero and infinity)
df_filtered$n_cit_2_log <- df_filtered$n_cit_2 + 1
df_filtered$n_cit_2_log <- sapply(df_filtered$n_cit_2_log,log)
df_filtered$n_cit_3_log <- df_filtered$n_cit_3 + 1
df_filtered$n_cit_3_log <- sapply(df_filtered$n_cit_3_log,log)
df_filtered$n_cit_5_log <- df_filtered$n_cit_5 + 1
df_filtered$n_cit_5_log <- sapply(df_filtered$n_cit_5_log,log)
# log-transform of other variables (optional, but better fitting due to outliers)
df_filtered$n_authors <- sapply(df_filtered$n_authors,log)
df_filtered$h_index_mean <- df_filtered$h_index_mean + 1
df_filtered$h_index_mean <- sapply(df_filtered$h_index_mean,log)
df_filtered$n_references_tot <- df_filtered$n_references_tot + 1
df_filtered$n_references_tot <- sapply(df_filtered$n_references_tot,log)
# create the dataset only with cited papers and the boolean variable if a paper is cited
df_filtered_negative <- df_filtered[df_filtered$n_cit_3 > 0,]
df_filtered$is_cited <- factor(df_filtered$n_cit_3 > 0)
set.seed(101) # Set Seed so that same sample can be reproduced
# Now Selecting 75% of data as train and 25% as test
sample <- sample.int(n = nrow(df_filtered), size = floor(.75*nrow(df_filtered)), replace = F)
train <- df_filtered[sample, ]
test <- df_filtered[-sample, ]
# select the dataset which will be used in regressions
DATASET <- df_filtered
# exploratory stats and plots (cf. Tables 4 and 5 in the paper)
summary(DATASET)
corr <- round(cor(DATASET[, c("n_cit_3_log", "n_authors", "p_year", "p_month", "h_index_mean", "h_index_median", "n_references_tot")], method = "pearson"), 2)
upper <- corr
upper[upper.tri(corr, diag = TRUE)] <- ""
upper <- as.data.frame(upper)
upper
ggpairs(DATASET[, c("n_cit_3_log", "n_authors", "p_year", "h_index_mean", "h_index_median", "n_references_tot")])
# DAS class frequencies
par(mfcol = c(2, 2))
par(mar=c(4,4,4,4))
barplot(table(df$das_class), main="Full dataset")
barplot(table(df[df$das_class != 0,]$das_class))
barplot(table(DATASET$das_class), main="Filtered (before 2016)")
barplot(table(DATASET[DATASET$das_class != 0,]$das_class))
# check for lognormal distribution (and compare vs Pareto): it looks more like the former.
qqnorm(DATASET$n_cit_3_log)
qex <- function(x) qexp((rank(x)-.375)/(length(x)+.25))
plot(qex(DATASET$n_cit_3),DATASET$n_cit_3_log)
##################
# BASELINES: OLS #
##################
# https://stats.idre.ucla.edu/r/dae/robust-regression/
require(MASS)
require(DMwR)
# OLS
summary(m_ols <- lm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET))
# controlling for journal too
#summary(m_ols <- lm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos + C(j_lower), data = DATASET))
# check residuals
opar <- par(mfrow = c(2,2), oma = c(0, 0, 1.1, 0))
plot(m_ols, las = 1)
# Robust OLS
summary(m_rols <- rlm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET))
# ANOVA
summary(m_aov <- aov(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET))
# Compare: OLS is fine
DMwR::regr.eval(DATASET$n_cit_3_log, m_ols$fitted.values)
DMwR::regr.eval(DATASET$n_cit_3_log, m_rols$fitted.values)
DMwR::regr.eval(DATASET$n_cit_3_log, m_aov$fitted.values)
# Output in LaTeX (Table 6)
require(stargazer)
stargazer(m_ols, m_rols, title="Results", align=TRUE, mean.sd = FALSE)
#########
# TOBIT #
#########
# https://stats.idre.ucla.edu/r/dae/tobit-models/
# Also see: http://www.stat.columbia.edu/~madigan/G6101/notes/logisticTobit.pdf
require(VGAM)
summary(m <- vglm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, tobit(Lower = 0), data = DATASET))
ctable <- coef(summary(m))
pvals <- 2 * pt(abs(ctable[, "z value"]), df.residual(m), lower.tail = FALSE)
t <- cbind(ctable, pvals)
t
# significance of das_class via loglikelihood ratio test
m2 <- vglm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(journal_field) + das_required + das_encouraged + is_plos, tobit(Lower = 0), data = DATASET)
(p <- pchisq(2 * (logLik(m) - logLik(m2)), df = 2, lower.tail = FALSE))
# check residuals
DATASET$yhat <- fitted(m)[,1]
DATASET$rr <- resid(m, type = "response")
DATASET$rp <- resid(m, type = "pearson")[,1]
par(mfcol = c(2, 3))
par(mar=c(4,4,4,4))
with(DATASET, {
plot(yhat, rr, main = "Fitted vs Residuals")
qqnorm(rr)
plot(yhat, rp, main = "Fitted vs Pearson Residuals")
qqnorm(rp)
plot(n_cit_3_log, rp, main = "Actual vs Pearson Residuals")
plot(n_cit_3_log, yhat, main = "Actual vs Fitted")
})
# correlation predicted vs data
(r <- with(DATASET, cor(yhat, n_cit_3_log)))
############
# LOGISTIC #
############
# https://www.r-bloggers.com/how-to-perform-a-logistic-regression-in-r/
# Really good results, yet the strong predictors here are year and PLoS due to policy timing effects.
require(nnet)
# Predicting if has_das or das_class
summary(m_logistic <- glm(has_das ~ n_cit_3_log + n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(journal_field) + das_required + das_encouraged + is_plos, data = DATASET, family = binomial))
summary(m_logistic <- multinom(C(das_class) ~ n_cit_3_log + n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(journal_field) + das_required + das_encouraged + is_plos, data = DATASET))
head(pp <- fitted(m_logistic)) # see probabilities
# Predicting if is_cited or not
summary(m_logistic <- glm(is_cited ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET, family=binomial(link='logit')))
anova(m_logistic, test="Chisq")
###################################
# GLM: NEGATIVE BINOMIAL and more #
###################################
# https://stats.idre.ucla.edu/r/dae/zinb/
require(MASS)
library(gamlss)
# standard negative binomial
summary(m_neg <- gamlss(n_cit_3 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=NBF()))
# continuous lognormal
summary(m_log <- gamlss(n_cit_3 +1 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=LOGNO()))
# Pareto type 2
summary(m_par <- gamlss(n_cit_3 +1 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=PARETO2()))
# zero-inflated negative binomial
summary(m_zero_neg <- gamlss(n_cit_3 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=ZINBF()))
# just with cited publications
summary(m_neg_neg <- glm.nb(n_cit_3 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = df_filtered_negative))
ctable <- coef(summary(m_neg_neg))
pvals <- 2 * pt(abs(ctable[, "z value"]), df.residual(m_neg_neg), lower.tail = FALSE)
t2 <- cbind(ctable, pvals)
t2
# CITATIONS to packages
basecit <- system.file("CITATION", package="base")
source(basecit, echo=TRUE)
readCitationFile(basecit)
basecit <- system.file("CITATION", package="nnet")
source(basecit, echo=TRUE)
readCitationFile(basecit)
# Export BMC Journal DAS summary
library("dplyr")
df <- read.csv("dataset/export_full.csv", sep = ";")
j_df <- df %>%
filter(!is.na(j_lower)) %>%
dplyr::select(j_lower,das_class,has_das,das_encouraged,das_required) %>%
mutate(
j_lower = forcats::fct_explicit_na(j_lower),
das_class = as.integer(das_class),
has_das = as.integer(has_das)-1,
das_encouraged = as.integer(das_encouraged)-1,
das_required = as.integer(das_required)-1
) %>%
group_by(j_lower,das_class) %>%
summarise(N = n(),has_das = sum(has_das),das_encouraged = sum(das_encouraged) - sum(das_required),das_required = sum(das_required)) %>%
as.data.frame()
write.csv(j_df, file = "dataset/journal_das_summary.csv",row.names=FALSE)
|
/analysis/r_models.R
|
permissive
|
KirstieJane/das-public
|
R
| false
| false
| 9,828
|
r
|
#########################
# MODEL FITTING for DAS #
#########################
# Author: Giovanni Colavizza
require(ggplot2)
# load the dataset and make transformations
df <- read.csv("dataset/export_full.csv", sep = ";")
df$has_das <- factor(df$has_das)
df$is_plos <- factor(df$is_plos)
df$is_bmc <- factor(df$is_bmc)
df$has_month <- factor(df$has_month)
df$das_class <- factor(as.integer(df$das_class))
df$j_lower <- factor(df$j_lower)
df$journal_domain <- factor(df$journal_domain)
df$journal_field <- factor(df$journal_field)
df$journal_subfield <- factor(df$journal_subfield)
# filter for NaN and by time (assuming to use 3y citation window: exclude 2016 to 2018)
df_filtered <- df[(!is.na(df$h_index_mean))&(df$p_year<2016),]
# log-transform (add 1 to bound between zero and infinity)
df_filtered$n_cit_2_log <- df_filtered$n_cit_2 + 1
df_filtered$n_cit_2_log <- sapply(df_filtered$n_cit_2_log,log)
df_filtered$n_cit_3_log <- df_filtered$n_cit_3 + 1
df_filtered$n_cit_3_log <- sapply(df_filtered$n_cit_3_log,log)
df_filtered$n_cit_5_log <- df_filtered$n_cit_5 + 1
df_filtered$n_cit_5_log <- sapply(df_filtered$n_cit_5_log,log)
# log-transform of other variables (optional, but better fitting due to outliers)
df_filtered$n_authors <- sapply(df_filtered$n_authors,log)
df_filtered$h_index_mean <- df_filtered$h_index_mean + 1
df_filtered$h_index_mean <- sapply(df_filtered$h_index_mean,log)
df_filtered$n_references_tot <- df_filtered$n_references_tot + 1
df_filtered$n_references_tot <- sapply(df_filtered$n_references_tot,log)
# create the dataset only with cited papers and the boolean variable if a paper is cited
df_filtered_negative <- df_filtered[df_filtered$n_cit_3 > 0,]
df_filtered$is_cited <- factor(df_filtered$n_cit_3 > 0)
set.seed(101) # Set Seed so that same sample can be reproduced
# Now Selecting 75% of data as train and 25% as test
sample <- sample.int(n = nrow(df_filtered), size = floor(.75*nrow(df_filtered)), replace = F)
train <- df_filtered[sample, ]
test <- df_filtered[-sample, ]
# select the dataset which will be used in regressions
DATASET <- df_filtered
# exploratory stats and plots (cf. Tables 4 and 5 in the paper)
summary(DATASET)
corr <- round(cor(DATASET[, c("n_cit_3_log", "n_authors", "p_year", "p_month", "h_index_mean", "h_index_median", "n_references_tot")], method = "pearson"), 2)
upper <- corr
upper[upper.tri(corr, diag = TRUE)] <- ""
upper <- as.data.frame(upper)
upper
ggpairs(DATASET[, c("n_cit_3_log", "n_authors", "p_year", "h_index_mean", "h_index_median", "n_references_tot")])
# DAS class frequencies
par(mfcol = c(2, 2))
par(mar=c(4,4,4,4))
barplot(table(df$das_class), main="Full dataset")
barplot(table(df[df$das_class != 0,]$das_class))
barplot(table(DATASET$das_class), main="Filtered (before 2016)")
barplot(table(DATASET[DATASET$das_class != 0,]$das_class))
# check for lognormal distribution (and compare vs Pareto): it looks more like the former.
qqnorm(DATASET$n_cit_3_log)
qex <- function(x) qexp((rank(x)-.375)/(length(x)+.25))
plot(qex(DATASET$n_cit_3),DATASET$n_cit_3_log)
##################
# BASELINES: OLS #
##################
# https://stats.idre.ucla.edu/r/dae/robust-regression/
require(MASS)
require(DMwR)
# OLS
summary(m_ols <- lm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET))
# controlling for journal too
#summary(m_ols <- lm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos + C(j_lower), data = DATASET))
# check residuals
opar <- par(mfrow = c(2,2), oma = c(0, 0, 1.1, 0))
plot(m_ols, las = 1)
# Robust OLS
summary(m_rols <- rlm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET))
# ANOVA
summary(m_aov <- aov(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET))
# Compare: OLS is fine
DMwR::regr.eval(DATASET$n_cit_3_log, m_ols$fitted.values)
DMwR::regr.eval(DATASET$n_cit_3_log, m_rols$fitted.values)
DMwR::regr.eval(DATASET$n_cit_3_log, m_aov$fitted.values)
# Output in LaTeX (Table 6)
require(stargazer)
stargazer(m_ols, m_rols, title="Results", align=TRUE, mean.sd = FALSE)
#########
# TOBIT #
#########
# https://stats.idre.ucla.edu/r/dae/tobit-models/
# Also see: http://www.stat.columbia.edu/~madigan/G6101/notes/logisticTobit.pdf
require(VGAM)
summary(m <- vglm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, tobit(Lower = 0), data = DATASET))
ctable <- coef(summary(m))
pvals <- 2 * pt(abs(ctable[, "z value"]), df.residual(m), lower.tail = FALSE)
t <- cbind(ctable, pvals)
t
# significance of das_class via loglikelihood ratio test
m2 <- vglm(n_cit_3_log ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(journal_field) + das_required + das_encouraged + is_plos, tobit(Lower = 0), data = DATASET)
(p <- pchisq(2 * (logLik(m) - logLik(m2)), df = 2, lower.tail = FALSE))
# check residuals
DATASET$yhat <- fitted(m)[,1]
DATASET$rr <- resid(m, type = "response")
DATASET$rp <- resid(m, type = "pearson")[,1]
par(mfcol = c(2, 3))
par(mar=c(4,4,4,4))
with(DATASET, {
plot(yhat, rr, main = "Fitted vs Residuals")
qqnorm(rr)
plot(yhat, rp, main = "Fitted vs Pearson Residuals")
qqnorm(rp)
plot(n_cit_3_log, rp, main = "Actual vs Pearson Residuals")
plot(n_cit_3_log, yhat, main = "Actual vs Fitted")
})
# correlation predicted vs data
(r <- with(DATASET, cor(yhat, n_cit_3_log)))
############
# LOGISTIC #
############
# https://www.r-bloggers.com/how-to-perform-a-logistic-regression-in-r/
# Really good results, yet the strong predictors here are year and PLoS due to policy timing effects.
require(nnet)
# Predicting if has_das or das_class
summary(m_logistic <- glm(has_das ~ n_cit_3_log + n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(journal_field) + das_required + das_encouraged + is_plos, data = DATASET, family = binomial))
summary(m_logistic <- multinom(C(das_class) ~ n_cit_3_log + n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(journal_field) + das_required + das_encouraged + is_plos, data = DATASET))
head(pp <- fitted(m_logistic)) # see probabilities
# Predicting if is_cited or not
summary(m_logistic <- glm(is_cited ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = DATASET, family=binomial(link='logit')))
anova(m_logistic, test="Chisq")
###################################
# GLM: NEGATIVE BINOMIAL and more #
###################################
# https://stats.idre.ucla.edu/r/dae/zinb/
require(MASS)
library(gamlss)
# standard negative binomial
summary(m_neg <- gamlss(n_cit_3 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=NBF()))
# continuous lognormal
summary(m_log <- gamlss(n_cit_3 +1 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=LOGNO()))
# Pareto type 2
summary(m_par <- gamlss(n_cit_3 +1 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=PARETO2()))
# zero-inflated negative binomial
summary(m_zero_neg <- gamlss(n_cit_3 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = na.omit(DATASET), family=ZINBF()))
# just with cited publications
summary(m_neg_neg <- glm.nb(n_cit_3 ~ n_authors + n_references_tot + p_year + p_month + h_index_mean + h_index_median + C(das_class) + C(journal_field) + das_required + das_encouraged + is_plos + C(das_class)*is_plos, data = df_filtered_negative))
ctable <- coef(summary(m_neg_neg))
pvals <- 2 * pt(abs(ctable[, "z value"]), df.residual(m_neg_neg), lower.tail = FALSE)
t2 <- cbind(ctable, pvals)
t2
# CITATIONS to packages
basecit <- system.file("CITATION", package="base")
source(basecit, echo=TRUE)
readCitationFile(basecit)
basecit <- system.file("CITATION", package="nnet")
source(basecit, echo=TRUE)
readCitationFile(basecit)
# Export BMC Journal DAS summary
library("dplyr")
df <- read.csv("dataset/export_full.csv", sep = ";")
j_df <- df %>%
filter(!is.na(j_lower)) %>%
dplyr::select(j_lower,das_class,has_das,das_encouraged,das_required) %>%
mutate(
j_lower = forcats::fct_explicit_na(j_lower),
das_class = as.integer(das_class),
has_das = as.integer(has_das)-1,
das_encouraged = as.integer(das_encouraged)-1,
das_required = as.integer(das_required)-1
) %>%
group_by(j_lower,das_class) %>%
summarise(N = n(),has_das = sum(has_das),das_encouraged = sum(das_encouraged) - sum(das_required),das_required = sum(das_required)) %>%
as.data.frame()
write.csv(j_df, file = "dataset/journal_das_summary.csv",row.names=FALSE)
|
### Do some analysis on the traffic experiments
library(ggplot2)
setwd("/Users/VGupta/Documents/Research/Julia Stuff/Traffic/")
dat = read.csv(file="trafficCVal.csv")
names(dat)<- c("Lambda", "C", "deg", "Mean", "SD")
pos = position_dodge(.5)
#The absolute minimum occurs at row 36:
# 1e05, 3.7625, deg 5
ggplot(aes(x=Lambda, y=Mean, color=factor(C), group=factor(C)),
data=subset(dat, deg == 6 & C > 1 & C < 4.1)) +
geom_point(position=pos) + geom_line(position=pos) +
geom_errorbar(aes(ymin=Mean - SD, ymax=Mean+SD), position=pos) +
scale_x_log10()
ggplot(aes(x=C, y=Mean, color=factor(Lambda), group=factor(Lambda)),
data=subset(dat, Lambda >=.1 & deg==5)) +
geom_point(position=pos) + geom_line(position=pos) +
geom_errorbar(aes(ymin=Mean - SD, ymax=Mean+SD), position=pos)
unique(dat$C)
### Plotting the various fits for different d
library(reshape)
dat.fits = read.csv("fittedFuncs.csv")
str(dat.fits)
dat.fits.melt = melt(dat.fits, id="Flow", variable_name="Degree")
ggplot(aes(x=Flow, color=Degree, linetype=Degree, y=value),
data=dat.fits.melt) +
geom_line() +
theme_bw(base_size=18) +
xlab("Scaled Flow") + ylab("") +
theme(legend.title=element_blank(),
legend.position = c(.2, .7))
### Plotting the residuals from traffic
dat.resids = read.csv("resids.csv")
ggplot(aes(x=ResidsRel), data = dat.resids) +
geom_histogram(aes(y=..density..), fill="grey") +
theme_bw(base_size=18) +
ylab("") +
xlab("Relative Residuals")
dat.flows = read.csv("FlowDiffs.csv")
ggplot(aes(x=Rel), data=dat.flows) +
geom_histogram(aes(y=..density..), fill="grey") +
theme_bw(base_size=18) +
ylab("") +
xlab("Rel. Prediction Error")
dat.fits2 = read.csv("amgSetsTraffic.csv")
str(dat.fits2)
ggplot(aes(x=Flows, y=True), data =dat.fits2) +
geom_line(size=.5, linetype="dashed") +
geom_point(aes(y=Fit), color="black", size=3, shape=15) +
geom_line(aes(y=Fit), color="black", size=.5) +
ylab("") +
geom_ribbon(aes(ymin=LB, ymax=UB), fill="grey", alpha=.3) +
coord_cartesian(ylim=c(0, 4)) +
theme_bw(base_size=18)
|
/archive/genGraphs.r
|
permissive
|
jingzbu/InverseVIsTraffic
|
R
| false
| false
| 2,110
|
r
|
### Do some analysis on the traffic experiments
library(ggplot2)
setwd("/Users/VGupta/Documents/Research/Julia Stuff/Traffic/")
dat = read.csv(file="trafficCVal.csv")
names(dat)<- c("Lambda", "C", "deg", "Mean", "SD")
pos = position_dodge(.5)
#The absolute minimum occurs at row 36:
# 1e05, 3.7625, deg 5
ggplot(aes(x=Lambda, y=Mean, color=factor(C), group=factor(C)),
data=subset(dat, deg == 6 & C > 1 & C < 4.1)) +
geom_point(position=pos) + geom_line(position=pos) +
geom_errorbar(aes(ymin=Mean - SD, ymax=Mean+SD), position=pos) +
scale_x_log10()
ggplot(aes(x=C, y=Mean, color=factor(Lambda), group=factor(Lambda)),
data=subset(dat, Lambda >=.1 & deg==5)) +
geom_point(position=pos) + geom_line(position=pos) +
geom_errorbar(aes(ymin=Mean - SD, ymax=Mean+SD), position=pos)
unique(dat$C)
### Plotting the various fits for different d
library(reshape)
dat.fits = read.csv("fittedFuncs.csv")
str(dat.fits)
dat.fits.melt = melt(dat.fits, id="Flow", variable_name="Degree")
ggplot(aes(x=Flow, color=Degree, linetype=Degree, y=value),
data=dat.fits.melt) +
geom_line() +
theme_bw(base_size=18) +
xlab("Scaled Flow") + ylab("") +
theme(legend.title=element_blank(),
legend.position = c(.2, .7))
### Plotting the residuals from traffic
dat.resids = read.csv("resids.csv")
ggplot(aes(x=ResidsRel), data = dat.resids) +
geom_histogram(aes(y=..density..), fill="grey") +
theme_bw(base_size=18) +
ylab("") +
xlab("Relative Residuals")
dat.flows = read.csv("FlowDiffs.csv")
ggplot(aes(x=Rel), data=dat.flows) +
geom_histogram(aes(y=..density..), fill="grey") +
theme_bw(base_size=18) +
ylab("") +
xlab("Rel. Prediction Error")
dat.fits2 = read.csv("amgSetsTraffic.csv")
str(dat.fits2)
ggplot(aes(x=Flows, y=True), data =dat.fits2) +
geom_line(size=.5, linetype="dashed") +
geom_point(aes(y=Fit), color="black", size=3, shape=15) +
geom_line(aes(y=Fit), color="black", size=.5) +
ylab("") +
geom_ribbon(aes(ymin=LB, ymax=UB), fill="grey", alpha=.3) +
coord_cartesian(ylim=c(0, 4)) +
theme_bw(base_size=18)
|
#'A S3 class to represent the address for a geocoding request.
#'
#'Creates an object of the class \code{adrs}. The \code{adrs} object contains a
#'single string representing the address of a place.
#'
#'@param x A string with address. Address should be specified in the format used
#' by the national postal service of the country concerned.
#'
#'@field address Address (string).
#'
#'@examples
#'x <- adrs('Linköpings Universitet, 58183 Linköping')
#'print(x)
#'
#'@seealso
#'\href{https://developers.google.com/maps/documentation/#'geocoding/
#'intro?hl=de}{Google Geo API Description}
#'
#'@export
adrs <- function(a) {
'Function for creating objects of class address.'
#Check if all arguments were set
if(missing(a)) {
stop('Please specify all arguments.')
}
# Check if the arguments have a valid type.
if(!is.character(a)) {
stop('Please specify all arguments as a string.')
}
structure(
list(
address = a
),
class = 'adrs'
)
}
#'@export
print.adrs <- function(x, ...) {
cat('Adress:\n')
cat('---\n')
cat(x$address)
}
|
/R/classAddress.R
|
no_license
|
joshu107/GoogleGeoAPI
|
R
| false
| false
| 1,081
|
r
|
#'A S3 class to represent the address for a geocoding request.
#'
#'Creates an object of the class \code{adrs}. The \code{adrs} object contains a
#'single string representing the address of a place.
#'
#'@param x A string with address. Address should be specified in the format used
#' by the national postal service of the country concerned.
#'
#'@field address Address (string).
#'
#'@examples
#'x <- adrs('Linköpings Universitet, 58183 Linköping')
#'print(x)
#'
#'@seealso
#'\href{https://developers.google.com/maps/documentation/#'geocoding/
#'intro?hl=de}{Google Geo API Description}
#'
#'@export
adrs <- function(a) {
'Function for creating objects of class address.'
#Check if all arguments were set
if(missing(a)) {
stop('Please specify all arguments.')
}
# Check if the arguments have a valid type.
if(!is.character(a)) {
stop('Please specify all arguments as a string.')
}
structure(
list(
address = a
),
class = 'adrs'
)
}
#'@export
print.adrs <- function(x, ...) {
cat('Adress:\n')
cat('---\n')
cat(x$address)
}
|
test_that("meteo_noaa_hourly", {
noaa = meteo_noaa_hourly(station = "123300-99999",
year = 2019)
expect_true(all(noaa$year == 2019))
})
|
/tests/testthat/test-meteo_noaa_hourly.R
|
no_license
|
cran/climate
|
R
| false
| false
| 170
|
r
|
test_that("meteo_noaa_hourly", {
noaa = meteo_noaa_hourly(station = "123300-99999",
year = 2019)
expect_true(all(noaa$year == 2019))
})
|
structure(list(record_id = 1:5, name_first = c("Nutmeg", "Tumtum",
"Marcus", "Trudy", "John Lee"), name_last = c("Nutmouse", "Nutmouse",
"Wood", "DAG", "Walker"), address = c("14 Rose Cottage St.\nKenning UK, 323232",
"14 Rose Cottage Blvd.\nKenning UK 34243", "243 Hill St.\nGuthrie OK 73402",
"342 Elm\nDuncanville TX, 75116", "Hotel Suite\nNew Orleans LA, 70115"
), telephone = c("(405) 321-1111", "(405) 321-2222", "(405) 321-3333",
"(405) 321-4444", "(405) 321-5555"), email = c("nutty@mouse.com",
"tummy@mouse.comm", "mw@mwood.net", "peroxide@blonde.com", "left@hippocket.com"
), dob = c("2003-08-30", "2003-03-10", "1934-04-09", "1952-11-02",
"1955-04-15"), age = c(11L, 11L, 80L, 61L, 59L), sex = c("Female",
"Male", "Male", "Female", "Male"), height = c(7, 6, 180, 165,
193.04), weight = c(1L, 1L, 80L, 54L, 104L), bmi = c(204.1, 277.8,
24.7, 19.8, 27.9), comments = c("Character in a book, with some guessing",
"A mouse character from a good book", "completely made up", "This record doesn't have a DAG assigned\n\nSo call up Trudy on the telephone\nSend her a letter in the mail",
"Had a hand for trouble and a eye for cash\n\nHe had a gold watch chain and a black mustache"
), mugshot = c(197977L, 197978L, 197979L, 198002L, 198021L),
race___1 = c(FALSE, FALSE, FALSE, FALSE, FALSE), race___2 = c(FALSE,
FALSE, FALSE, FALSE, FALSE), race___3 = c(FALSE, FALSE, FALSE,
FALSE, FALSE), race___4 = c(FALSE, FALSE, FALSE, FALSE, FALSE
), race___5 = c(FALSE, FALSE, FALSE, FALSE, FALSE), race___6 = c(FALSE,
FALSE, FALSE, FALSE, FALSE), ethnicity = c("NOT Hispanic or Latino",
"NOT Hispanic or Latino", "Unknown / Not Reported", "NOT Hispanic or Latino",
"Hispanic or Latino"), interpreter_needed = c("False", "False",
"True", NA, "False"), demographics_complete = c("Complete",
"Complete", "Complete", "Complete", "Complete"), health_complete = c("Unverified",
"Incomplete", "Complete", "Complete", "Incomplete"), race_and_ethnicity_complete = c("Complete",
"Incomplete", "Complete", "Complete", "Complete")), row.names = c(NA,
-5L), class = c("tbl_df", "tbl", "data.frame"))
|
/inst/test-data/specific-redcapr/read-oneshot-eav/label-and-dag.R
|
permissive
|
OuhscBbmc/REDCapR
|
R
| false
| false
| 2,149
|
r
|
structure(list(record_id = 1:5, name_first = c("Nutmeg", "Tumtum",
"Marcus", "Trudy", "John Lee"), name_last = c("Nutmouse", "Nutmouse",
"Wood", "DAG", "Walker"), address = c("14 Rose Cottage St.\nKenning UK, 323232",
"14 Rose Cottage Blvd.\nKenning UK 34243", "243 Hill St.\nGuthrie OK 73402",
"342 Elm\nDuncanville TX, 75116", "Hotel Suite\nNew Orleans LA, 70115"
), telephone = c("(405) 321-1111", "(405) 321-2222", "(405) 321-3333",
"(405) 321-4444", "(405) 321-5555"), email = c("nutty@mouse.com",
"tummy@mouse.comm", "mw@mwood.net", "peroxide@blonde.com", "left@hippocket.com"
), dob = c("2003-08-30", "2003-03-10", "1934-04-09", "1952-11-02",
"1955-04-15"), age = c(11L, 11L, 80L, 61L, 59L), sex = c("Female",
"Male", "Male", "Female", "Male"), height = c(7, 6, 180, 165,
193.04), weight = c(1L, 1L, 80L, 54L, 104L), bmi = c(204.1, 277.8,
24.7, 19.8, 27.9), comments = c("Character in a book, with some guessing",
"A mouse character from a good book", "completely made up", "This record doesn't have a DAG assigned\n\nSo call up Trudy on the telephone\nSend her a letter in the mail",
"Had a hand for trouble and a eye for cash\n\nHe had a gold watch chain and a black mustache"
), mugshot = c(197977L, 197978L, 197979L, 198002L, 198021L),
race___1 = c(FALSE, FALSE, FALSE, FALSE, FALSE), race___2 = c(FALSE,
FALSE, FALSE, FALSE, FALSE), race___3 = c(FALSE, FALSE, FALSE,
FALSE, FALSE), race___4 = c(FALSE, FALSE, FALSE, FALSE, FALSE
), race___5 = c(FALSE, FALSE, FALSE, FALSE, FALSE), race___6 = c(FALSE,
FALSE, FALSE, FALSE, FALSE), ethnicity = c("NOT Hispanic or Latino",
"NOT Hispanic or Latino", "Unknown / Not Reported", "NOT Hispanic or Latino",
"Hispanic or Latino"), interpreter_needed = c("False", "False",
"True", NA, "False"), demographics_complete = c("Complete",
"Complete", "Complete", "Complete", "Complete"), health_complete = c("Unverified",
"Incomplete", "Complete", "Complete", "Incomplete"), race_and_ethnicity_complete = c("Complete",
"Incomplete", "Complete", "Complete", "Complete")), row.names = c(NA,
-5L), class = c("tbl_df", "tbl", "data.frame"))
|
.Random.seed <-
c(403L, 10L, 126536518L, -1580181087L, -2067257293L, -1377737310L,
421945492L, 1048037143L, -517607155L, -1360242652L, 148076554L,
1245080949L, 315874511L, 731516886L, 202870656L, -302586941L,
-1569454751L, -1494369328L, -1314850002L, -1146312999L, 1712551147L,
250164570L, -277797092L, -860241377L, 972497525L, 1494052620L,
441336546L, -280832067L, 580513847L, -1076970786L, -774420968L,
2043881851L, 424059161L, -611250776L, 1376055638L, -245411375L,
-193719933L, -459378670L, -598105180L, 1307120487L, 861022397L,
-59191244L, -1191828582L, 1637068773L, 1069775743L, -1413977338L,
1316930704L, -585886413L, 874164945L, 464599168L, 1841193310L,
1363465897L, -2000977573L, 896682858L, -1238195700L, 286859855L,
930074373L, 605521852L, -190242158L, 1840826765L, 901476807L,
-2127539538L, -1637582072L, -493442805L, 1508210665L, 1493307576L,
-132868250L, -1540951039L, -649657709L, -1479187134L, -1724011788L,
-1756752137L, -1132136723L, -177864316L, 1587285930L, -1429777131L,
985194415L, 1416687542L, 1731106272L, 1437199651L, -1346059711L,
170241328L, -2057197746L, -1485787719L, -1039364149L, 1497941114L,
1205556604L, 417410495L, 1578109589L, 1099954348L, -1886322046L,
1972013661L, -1702609577L, 1756790846L, 956961912L, 949878619L,
1055607481L, 566970120L, -1579237962L, -1576983823L, -50551517L,
-856588430L, 1771097540L, 1768035719L, 1389254493L, -1859110572L,
1854520186L, 1919630021L, -488772577L, 376552358L, -1337541328L,
1346699219L, 834694641L, -973300960L, 1159255486L, 1174957513L,
831615355L, 173225802L, 1095559084L, 1995583151L, 1369386085L,
30170140L, -884755854L, 1346520301L, -247800537L, 1680279886L,
1701063080L, 979332523L, 722265801L, 1849392856L, 451339142L,
2112845409L, -491622413L, 1435304546L, 6141140L, 1456543191L,
606576589L, 418863972L, -699072694L, 769599157L, -361349617L,
-1110216938L, 358234560L, -1053455997L, 55553953L, 365625232L,
1983308910L, 613175833L, 160897707L, -1953210726L, 1259141724L,
1891256159L, -1247831755L, -1601718068L, 264045090L, 1684774013L,
1049347447L, -605263202L, 233564504L, 566005947L, 925126233L,
-2133189144L, -693098474L, -509127791L, 387537987L, 314397522L,
2029123556L, 1790363175L, -877035907L, 2030534516L, 1837716314L,
-1565817435L, 330867135L, -1134441914L, -1597503280L, -1427823885L,
-373804015L, -182934976L, -87706082L, -1472831255L, -1830919653L,
-149044694L, 1312376140L, -624261361L, -885183931L, 576625660L,
23749202L, 50216909L, -1933133433L, -2035471250L, -164867000L,
100691531L, 624903081L, 368036600L, -198975962L, 955438913L,
1475517651L, 152830082L, -1398292812L, -445007305L, -1771110099L,
-224931260L, -1482109078L, 628030421L, 95092079L, -476714890L,
1907737504L, -799837085L, -1612488447L, -1050301328L, 1899808782L,
-1873872775L, 1367799819L, 838670650L, -306542532L, -1599578241L,
403051733L, -74422036L, 1765122626L, 2084382109L, 1736720407L,
-445778562L, 135285560L, 1362490395L, -976736903L, 1708964808L,
-1406031882L, 1549691441L, -414188445L, 2017161484L, 2059994684L,
-2079502094L, 720218864L, 445100868L, -749249960L, -584539590L,
1626900208L, 1773489340L, -150622700L, 787022018L, -423010208L,
1531202940L, 1158397136L, -1935741918L, 1457339048L, -1081327732L,
114538940L, 292406322L, -2019154112L, -535299916L, 1003869000L,
497478330L, -2055565552L, 1956600044L, 60355668L, 2093602578L,
429339552L, -1672568884L, -1362318496L, 1906999490L, 640910888L,
1857033644L, 1495396156L, -603947150L, 1078383408L, -117915100L,
-61919176L, 2006453466L, 460259728L, -1065748740L, -1762586732L,
565560130L, 370768384L, 1419174460L, 2023036496L, 1480110754L,
-754271416L, 345111052L, -652677924L, -19680686L, -479483776L,
-209945772L, -1835789848L, 1063267066L, -1804134576L, -425802708L,
-527886988L, 2098953874L, 1081113184L, -410496372L, 841149280L,
246641378L, -752600920L, -352915252L, 2064966140L, -2059632718L,
1870870000L, -1007587900L, 1829394264L, -610763910L, 2127281584L,
35832252L, 815251540L, 933589954L, -1551252704L, 530446268L,
-358534576L, -864719390L, -1711607448L, -686592692L, 1515700476L,
-1004322062L, -1364439296L, 168033908L, -2083405816L, 525963706L,
-1199116336L, -1846867476L, -1992325228L, -1037749550L, -2013913504L,
698434892L, -65608416L, 39176834L, -376874520L, -1338441748L,
1913792060L, -610662798L, 859122544L, 1996540196L, -1105599944L,
1721686426L, 45218256L, 1972807100L, -2145240940L, -823765886L,
-1548302912L, -1897411780L, -652579696L, 1772460834L, 855012872L,
297436428L, -876251748L, 636489234L, -676340864L, 2101130004L,
-1056722648L, 634766394L, -1897722928L, -528467348L, -925797836L,
-999949678L, -993770144L, 2141681996L, -919566368L, 1711109538L,
-1201031768L, 92968588L, -2077033028L, -1150054798L, -692874256L,
1577758276L, -579850152L, 1200994362L, -933797776L, -1064814532L,
-122347244L, -810884414L, 20702944L, 1707366524L, 1136391888L,
1883601314L, -833828824L, 1717357324L, -984282692L, 1562268466L,
1725520576L, 294464564L, -2114908472L, -557820742L, 1597626000L,
670916332L, -529667500L, 1970115218L, 1395972384L, 449561036L,
-137354528L, 513765570L, 1919462440L, -1584343124L, 1787615932L,
1560308722L, -570198480L, 109051812L, -1512460872L, -650893478L,
-1043230576L, 615160700L, 290717972L, 2041952578L, 1255645440L,
-1470222916L, 1449386320L, -1412138718L, 1372093000L, -692858868L,
1965979228L, -756201006L, 1139357184L, 692090196L, 1884720360L,
-1645649670L, -505633072L, 427223852L, -794431244L, -2088494702L,
1698028896L, -1937667060L, -1233290016L, 735625826L, -31308632L,
-1634976692L, -1640245380L, 51001650L, -1048645008L, -184061116L,
39523288L, -1841224838L, 2023327792L, -427233348L, -715300268L,
1802670146L, 345697952L, 1203831484L, 53660624L, -1531626142L,
1532042984L, 340671692L, 1098617340L, 1198382578L, -1715362688L,
-1250366988L, 1841797768L, 1425165114L, 182857040L, 1018276972L,
-661641836L, 520372306L, 1118698976L, 944248012L, -609911136L,
-477466238L, -502515480L, -839599380L, -281393860L, -73150350L,
-228661426L, 87190940L, 1902751501L, 185411959L, -915329664L,
-1331000354L, 1128957099L, 1353867181L, -1561644006L, -685532456L,
-739044543L, -564370445L, -1504072172L, 1258317730L, 1619253431L,
148898433L, 1837990534L, 1595593412L, -2074258443L, -844486049L,
1282366648L, 729233814L, -2014379101L, -2083457179L, -1702719870L,
445860848L, -1022972519L, 1677779851L, 487116412L, -188742486L,
-666749217L, 743197801L, -664541570L, 1630015084L, 1151513277L,
985051015L, 1146116976L, -1868043762L, -850278597L, 723475485L,
87643338L, -1217949720L, -231056175L, 1827485891L, -765587516L,
913837170L, -1721522873L, -1296049903L, -1693578090L, -756899340L,
2103038853L, -754124081L, 2118985672L, -583987610L, -284499821L,
-356966411L, 1971599826L, -1422204384L, -569140215L, 147038523L,
1258661004L, 1427140890L, -1737742449L, -551831335L, 2066667374L,
1160564476L, 5532781L, 724121623L, 1982923232L, 486846L, 2028671883L,
161432589L, -2107775942L, 1009149048L, -1116770271L, -892740909L,
-1525917196L, -1939635582L, 1221173015L, -1423616543L, 1927375654L,
1553384996L, 568885461L, -419536769L, -692898536L, -311989194L,
-1659620669L, -747610363L, 616092322L, -603815792L, 957370425L,
-426483477L, -1971416612L, -822855670L, -112376065L, -1086449847L,
660171998L, 920679052L, 462976605L, 1768692839L, -161730160L,
-175173202L, -1706834853L, 297096253L, -1994433878L, -381573304L,
1143982961L, -202224413L, 487908324L, -215813358L, -27361561L,
1538361521L, -33723658L, -1672680172L, -762400219L, -675840209L,
-1661746968L, -520085050L, -554151821L, -1848411435L, 419711154L,
-264004480L, 789643369L, -2085880613L, -1139871700L, -168169414L,
-592392465L, -602242695L, -1572680178L, -1332301860L, 1885333581L,
-335909961L, -920881856L, -1732103138L, -1063783701L, -539223443L,
1937885914L, -1926409576L, 1255993601L, 603354803L, -1550900780L,
1078005986L, -1165940361L, -331958079L, -68860858L, -2070606972L,
-1392476875L, 182446879L, -1027187080L, -1438203050L, 586566115L,
817799077L, -910705086L, -1899215824L, -674064295L, 1939035595L,
-151144900L, -1388165014L, -425356641L, 127377193L, 493585342L,
20378284L, -1541259267L, -1966397625L, -1116469200L, -1957445298L,
2037066107L, -551235875L, -1557051638L, 311518391L)
|
/R/ellipsefit-internal.R
|
no_license
|
sail622/hysteresis
|
R
| false
| false
| 8,236
|
r
|
.Random.seed <-
c(403L, 10L, 126536518L, -1580181087L, -2067257293L, -1377737310L,
421945492L, 1048037143L, -517607155L, -1360242652L, 148076554L,
1245080949L, 315874511L, 731516886L, 202870656L, -302586941L,
-1569454751L, -1494369328L, -1314850002L, -1146312999L, 1712551147L,
250164570L, -277797092L, -860241377L, 972497525L, 1494052620L,
441336546L, -280832067L, 580513847L, -1076970786L, -774420968L,
2043881851L, 424059161L, -611250776L, 1376055638L, -245411375L,
-193719933L, -459378670L, -598105180L, 1307120487L, 861022397L,
-59191244L, -1191828582L, 1637068773L, 1069775743L, -1413977338L,
1316930704L, -585886413L, 874164945L, 464599168L, 1841193310L,
1363465897L, -2000977573L, 896682858L, -1238195700L, 286859855L,
930074373L, 605521852L, -190242158L, 1840826765L, 901476807L,
-2127539538L, -1637582072L, -493442805L, 1508210665L, 1493307576L,
-132868250L, -1540951039L, -649657709L, -1479187134L, -1724011788L,
-1756752137L, -1132136723L, -177864316L, 1587285930L, -1429777131L,
985194415L, 1416687542L, 1731106272L, 1437199651L, -1346059711L,
170241328L, -2057197746L, -1485787719L, -1039364149L, 1497941114L,
1205556604L, 417410495L, 1578109589L, 1099954348L, -1886322046L,
1972013661L, -1702609577L, 1756790846L, 956961912L, 949878619L,
1055607481L, 566970120L, -1579237962L, -1576983823L, -50551517L,
-856588430L, 1771097540L, 1768035719L, 1389254493L, -1859110572L,
1854520186L, 1919630021L, -488772577L, 376552358L, -1337541328L,
1346699219L, 834694641L, -973300960L, 1159255486L, 1174957513L,
831615355L, 173225802L, 1095559084L, 1995583151L, 1369386085L,
30170140L, -884755854L, 1346520301L, -247800537L, 1680279886L,
1701063080L, 979332523L, 722265801L, 1849392856L, 451339142L,
2112845409L, -491622413L, 1435304546L, 6141140L, 1456543191L,
606576589L, 418863972L, -699072694L, 769599157L, -361349617L,
-1110216938L, 358234560L, -1053455997L, 55553953L, 365625232L,
1983308910L, 613175833L, 160897707L, -1953210726L, 1259141724L,
1891256159L, -1247831755L, -1601718068L, 264045090L, 1684774013L,
1049347447L, -605263202L, 233564504L, 566005947L, 925126233L,
-2133189144L, -693098474L, -509127791L, 387537987L, 314397522L,
2029123556L, 1790363175L, -877035907L, 2030534516L, 1837716314L,
-1565817435L, 330867135L, -1134441914L, -1597503280L, -1427823885L,
-373804015L, -182934976L, -87706082L, -1472831255L, -1830919653L,
-149044694L, 1312376140L, -624261361L, -885183931L, 576625660L,
23749202L, 50216909L, -1933133433L, -2035471250L, -164867000L,
100691531L, 624903081L, 368036600L, -198975962L, 955438913L,
1475517651L, 152830082L, -1398292812L, -445007305L, -1771110099L,
-224931260L, -1482109078L, 628030421L, 95092079L, -476714890L,
1907737504L, -799837085L, -1612488447L, -1050301328L, 1899808782L,
-1873872775L, 1367799819L, 838670650L, -306542532L, -1599578241L,
403051733L, -74422036L, 1765122626L, 2084382109L, 1736720407L,
-445778562L, 135285560L, 1362490395L, -976736903L, 1708964808L,
-1406031882L, 1549691441L, -414188445L, 2017161484L, 2059994684L,
-2079502094L, 720218864L, 445100868L, -749249960L, -584539590L,
1626900208L, 1773489340L, -150622700L, 787022018L, -423010208L,
1531202940L, 1158397136L, -1935741918L, 1457339048L, -1081327732L,
114538940L, 292406322L, -2019154112L, -535299916L, 1003869000L,
497478330L, -2055565552L, 1956600044L, 60355668L, 2093602578L,
429339552L, -1672568884L, -1362318496L, 1906999490L, 640910888L,
1857033644L, 1495396156L, -603947150L, 1078383408L, -117915100L,
-61919176L, 2006453466L, 460259728L, -1065748740L, -1762586732L,
565560130L, 370768384L, 1419174460L, 2023036496L, 1480110754L,
-754271416L, 345111052L, -652677924L, -19680686L, -479483776L,
-209945772L, -1835789848L, 1063267066L, -1804134576L, -425802708L,
-527886988L, 2098953874L, 1081113184L, -410496372L, 841149280L,
246641378L, -752600920L, -352915252L, 2064966140L, -2059632718L,
1870870000L, -1007587900L, 1829394264L, -610763910L, 2127281584L,
35832252L, 815251540L, 933589954L, -1551252704L, 530446268L,
-358534576L, -864719390L, -1711607448L, -686592692L, 1515700476L,
-1004322062L, -1364439296L, 168033908L, -2083405816L, 525963706L,
-1199116336L, -1846867476L, -1992325228L, -1037749550L, -2013913504L,
698434892L, -65608416L, 39176834L, -376874520L, -1338441748L,
1913792060L, -610662798L, 859122544L, 1996540196L, -1105599944L,
1721686426L, 45218256L, 1972807100L, -2145240940L, -823765886L,
-1548302912L, -1897411780L, -652579696L, 1772460834L, 855012872L,
297436428L, -876251748L, 636489234L, -676340864L, 2101130004L,
-1056722648L, 634766394L, -1897722928L, -528467348L, -925797836L,
-999949678L, -993770144L, 2141681996L, -919566368L, 1711109538L,
-1201031768L, 92968588L, -2077033028L, -1150054798L, -692874256L,
1577758276L, -579850152L, 1200994362L, -933797776L, -1064814532L,
-122347244L, -810884414L, 20702944L, 1707366524L, 1136391888L,
1883601314L, -833828824L, 1717357324L, -984282692L, 1562268466L,
1725520576L, 294464564L, -2114908472L, -557820742L, 1597626000L,
670916332L, -529667500L, 1970115218L, 1395972384L, 449561036L,
-137354528L, 513765570L, 1919462440L, -1584343124L, 1787615932L,
1560308722L, -570198480L, 109051812L, -1512460872L, -650893478L,
-1043230576L, 615160700L, 290717972L, 2041952578L, 1255645440L,
-1470222916L, 1449386320L, -1412138718L, 1372093000L, -692858868L,
1965979228L, -756201006L, 1139357184L, 692090196L, 1884720360L,
-1645649670L, -505633072L, 427223852L, -794431244L, -2088494702L,
1698028896L, -1937667060L, -1233290016L, 735625826L, -31308632L,
-1634976692L, -1640245380L, 51001650L, -1048645008L, -184061116L,
39523288L, -1841224838L, 2023327792L, -427233348L, -715300268L,
1802670146L, 345697952L, 1203831484L, 53660624L, -1531626142L,
1532042984L, 340671692L, 1098617340L, 1198382578L, -1715362688L,
-1250366988L, 1841797768L, 1425165114L, 182857040L, 1018276972L,
-661641836L, 520372306L, 1118698976L, 944248012L, -609911136L,
-477466238L, -502515480L, -839599380L, -281393860L, -73150350L,
-228661426L, 87190940L, 1902751501L, 185411959L, -915329664L,
-1331000354L, 1128957099L, 1353867181L, -1561644006L, -685532456L,
-739044543L, -564370445L, -1504072172L, 1258317730L, 1619253431L,
148898433L, 1837990534L, 1595593412L, -2074258443L, -844486049L,
1282366648L, 729233814L, -2014379101L, -2083457179L, -1702719870L,
445860848L, -1022972519L, 1677779851L, 487116412L, -188742486L,
-666749217L, 743197801L, -664541570L, 1630015084L, 1151513277L,
985051015L, 1146116976L, -1868043762L, -850278597L, 723475485L,
87643338L, -1217949720L, -231056175L, 1827485891L, -765587516L,
913837170L, -1721522873L, -1296049903L, -1693578090L, -756899340L,
2103038853L, -754124081L, 2118985672L, -583987610L, -284499821L,
-356966411L, 1971599826L, -1422204384L, -569140215L, 147038523L,
1258661004L, 1427140890L, -1737742449L, -551831335L, 2066667374L,
1160564476L, 5532781L, 724121623L, 1982923232L, 486846L, 2028671883L,
161432589L, -2107775942L, 1009149048L, -1116770271L, -892740909L,
-1525917196L, -1939635582L, 1221173015L, -1423616543L, 1927375654L,
1553384996L, 568885461L, -419536769L, -692898536L, -311989194L,
-1659620669L, -747610363L, 616092322L, -603815792L, 957370425L,
-426483477L, -1971416612L, -822855670L, -112376065L, -1086449847L,
660171998L, 920679052L, 462976605L, 1768692839L, -161730160L,
-175173202L, -1706834853L, 297096253L, -1994433878L, -381573304L,
1143982961L, -202224413L, 487908324L, -215813358L, -27361561L,
1538361521L, -33723658L, -1672680172L, -762400219L, -675840209L,
-1661746968L, -520085050L, -554151821L, -1848411435L, 419711154L,
-264004480L, 789643369L, -2085880613L, -1139871700L, -168169414L,
-592392465L, -602242695L, -1572680178L, -1332301860L, 1885333581L,
-335909961L, -920881856L, -1732103138L, -1063783701L, -539223443L,
1937885914L, -1926409576L, 1255993601L, 603354803L, -1550900780L,
1078005986L, -1165940361L, -331958079L, -68860858L, -2070606972L,
-1392476875L, 182446879L, -1027187080L, -1438203050L, 586566115L,
817799077L, -910705086L, -1899215824L, -674064295L, 1939035595L,
-151144900L, -1388165014L, -425356641L, 127377193L, 493585342L,
20378284L, -1541259267L, -1966397625L, -1116469200L, -1957445298L,
2037066107L, -551235875L, -1557051638L, 311518391L)
|
# TX run forest...
# splits the data, runs two forests sequentially, then combines them
# scp "/Users/austinbean/Desktop/drgml/split_run_forest.R" beanaus@hsrdcsub2.pmacs.upenn.edu:/project/Lorch_project2018/bean/
# subset training since it may be too large:
dim1 = nrow(traind)
traind1 <- traind[1:floor(dim1/2),]
traind2 <- traind[(floor(dim1/2)+1):dim1,]
# train two forests:
forest_tst1 <- randomForest(ADMN_NICU~ .,
data=traind1,
ntree = tree_num,
do.trace=TRUE,
na.action=na.omit,
proximity=FALSE,
nodesize=100,
importance=TRUE)
forest_tst2 <- randomForest(ADMN_NICU~ .,
data=traind2,
ntree = tree_num,
do.trace=TRUE,
na.action=na.omit,
proximity=FALSE,
nodesize=100,
importance=TRUE)
# combine them:
library(randomForest)
forest_tst <- forest_combine(forest_tst1, forest_tst2)
|
/split_run_forest.R
|
no_license
|
austinbean/drgml
|
R
| false
| false
| 1,186
|
r
|
# TX run forest...
# splits the data, runs two forests sequentially, then combines them
# scp "/Users/austinbean/Desktop/drgml/split_run_forest.R" beanaus@hsrdcsub2.pmacs.upenn.edu:/project/Lorch_project2018/bean/
# subset training since it may be too large:
dim1 = nrow(traind)
traind1 <- traind[1:floor(dim1/2),]
traind2 <- traind[(floor(dim1/2)+1):dim1,]
# train two forests:
forest_tst1 <- randomForest(ADMN_NICU~ .,
data=traind1,
ntree = tree_num,
do.trace=TRUE,
na.action=na.omit,
proximity=FALSE,
nodesize=100,
importance=TRUE)
forest_tst2 <- randomForest(ADMN_NICU~ .,
data=traind2,
ntree = tree_num,
do.trace=TRUE,
na.action=na.omit,
proximity=FALSE,
nodesize=100,
importance=TRUE)
# combine them:
library(randomForest)
forest_tst <- forest_combine(forest_tst1, forest_tst2)
|
# =====================================================================================
# Objective
# -
#
# -------------------------------------------------------------------------------------
# Load libraries and parameters
#
options(stringsAsFactors = FALSE)
options(contrasts=c(unordered="contr.treatment", ordered="contr.treatment"))
library(parallel)
mc.cores = detectCores()
source('./lib/node.r')
source('./lib/series.r')
source('./lib/trajectory.structure.r')
source('./lib/trajectory.likelihood.r')
# -------------------------------------------------------------------------------------
#
#
load('./data/trajectory.proposed.a_pp.mc.rdata')
# -------------------------------------------------------------------------------------
#
#
load('./data/rank.proposed.a_pp.mc.rdata')
rank.proposed.a_pp.mc = rank.proposed.a_pp.mc$order[1:20]
trajs.proposed.a_pp.mc = trajs.proposed.a_pp.mc[ rank.proposed.a_pp.mc ]
rank.proposed.a_pp.mc = 1:20
# -------------------------------------------------------------------------------------
# Load data, transform data, and initialize parameters.
#
# Mayo data
load('./data/preprocess.progression_pairs.3_year.mc.rdata')
# Rename.
d.t0.mc = d_prev_2005_2007
d.t1.mc = d_prev_2012_2014
rm(d_prev_2005_2007, d_prev_2012_2014)
# Convert a data to a set of prefixes.
f.t0.mc = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t0.mc, exclusion=TRUE, mc.cores=mc.cores)
f.t1.mc = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t1.mc, exclusion=TRUE, mc.cores=mc.cores)
f.t0.mc = cbind.data.frame(f.t0.mc)
f.t1.mc = cbind.data.frame(f.t1.mc)
colnames(f.t0.mc) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
colnames(f.t1.mc) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
# Construct
bool = c(FALSE, TRUE)
combn.vs = expand.grid(
ob = bool,
hld = bool,
htn = bool,
pred = bool,
dm = bool,
renal = bool,
pvd = bool,
cad = bool,
mi = bool,
brain = bool,
chf = bool
)
combn.vs.index = cbind(combn.vs, index=1:nrow(combn.vs))
i.t0.mc = merge(x=cbind(i=1:nrow(d.t0.mc), d.t0.mc), y=combn.vs.index, all.x=TRUE)
i.t0.mc = i.t0.mc[order(i.t0.mc$i), ]$index
i.t1.mc = merge(x=cbind(i=1:nrow(d.t1.mc), d.t1.mc), y=combn.vs.index, all.x=TRUE)
i.t1.mc = i.t1.mc[order(i.t1.mc$i), ]$index
# Fairview data
load('./data/preprocess.progression_pairs.3_year.fv.rdata')
# Rename.
d.t0.fv = d_prev_2005_2007
d.t1.fv = d_prev_2012_2014
rm(d_prev_2005_2007, d_prev_2012_2014)
# Convert a data to a set of prefixes.
f.t0.fv = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t0.fv, exclusion=TRUE, mc.cores=mc.cores)
f.t1.fv = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t1.fv, exclusion=TRUE, mc.cores=mc.cores)
f.t0.fv = cbind.data.frame(f.t0.fv)
f.t1.fv = cbind.data.frame(f.t1.fv)
colnames(f.t0.fv) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
colnames(f.t1.fv) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
# Construct
bool = c(FALSE, TRUE)
combn.vs = expand.grid(
ob = bool,
hld = bool,
htn = bool,
pred = bool,
dm = bool,
renal = bool,
pvd = bool,
cad = bool,
mi = bool,
brain = bool,
chf = bool
)
combn.vs.index = cbind(combn.vs, index=1:nrow(combn.vs))
i.t0.fv = merge(x=cbind(i=1:nrow(d.t0.fv), d.t0.fv), y=combn.vs.index, all.x=TRUE)
i.t0.fv = i.t0.fv[order(i.t0.fv$i), ]$index
i.t1.fv = merge(x=cbind(i=1:nrow(d.t1.fv), d.t1.fv), y=combn.vs.index, all.x=TRUE)
i.t1.fv = i.t1.fv[order(i.t1.fv$i), ]$index
items = colnames(d.t0.mc)
# Documentation error.
# - For now, we are not going to allow documentation error.
errors.proposed.a_pp = .000
errors.proposed.a_pp = c(errors.proposed.a_pp, rep(0, length(items)-length(errors.proposed.a_pp)))
errors.proposed.a_pp = c(1-sum(errors.proposed.a_pp), errors.proposed.a_pp)
# -------------------------------------------------------------------------------------
#
#
B = 250
nlls.rank.proposed.a_pp.mc = mclapply(1:B, function(b, trajs, rank, d0.mc, d1.mc, f0.mc, f1.mc, i1.mc, items, combn, errors) {
cat('.')
# Train
set.seed(b)
tr = sample(x=1:nrow(d0.mc), size=nrow(d0.mc), replace=TRUE)
ts = setdiff(x=1:nrow(d0.mc), y=tr)
logits = lapply(items,
function(dx, d0, d1) {
data = data.frame(y=d1[, dx], d0)
fit = glm(y ~ ., data=data, family=binomial, subset=(d0[, dx]==0))
return(fit)
}, d0=d0.mc[tr, , drop=FALSE], d1=d1.mc[tr, , drop=FALSE])
names(logits) = items
following = count.following(trajs=trajs, f0=f0.mc[tr, , drop=FALSE], f1=f1.mc[tr, , drop=FALSE])
fit = lapply(1:length(trajs),
function(k, trajs, items, logits, combn) {
itm(k=k, trajs=trajs, items=items, logits=logits, combn=combn)
}, trajs=trajs, items=items, logits=logits, combn=combn)
# Test
nlls = lapply(1:20, function(i, rank, f0, i1, fit, following, errors) {
yhat = predict.tm(object=fit, data=f0, ks=rank[1:i], following=following, errors=errors)
yhat = yhat[cbind(seq_along(i1), i1)]
yhat[is.na (yhat)] = .Machine$double.eps
yhat[is.nan(yhat)] = .Machine$double.eps
yhat[yhat==0 ] = .Machine$double.eps
nll = -sum(log(yhat))
nll = nll / nrow(f0)
return( nll )
}, rank=rank, f0=f0.mc[ts, , drop=FALSE], i1=i1.mc[ts], fit=fit, following=following, errors=errors)
nlls = unlist(nlls)
return( nlls )
}, trajs=trajs.proposed.a_pp.mc, rank=rank.proposed.a_pp.mc, d0.mc=d.t0.mc, d1.mc=d.t1.mc, f0.mc=f.t0.mc, f1.mc=f.t1.mc, i1.mc=i.t1.mc, items=items, combn=combn.vs, errors=errors.proposed.a_pp, mc.cores=mc.cores)
B = 250
nlls.rank.proposed.a_pp.fv = mclapply(1:B, function(b, trajs, rank, d0.mc, d1.mc, f0.mc, f1.mc, f0.fv, i1.fv, items, combn, errors) {
cat('.')
# Train
set.seed(b)
tr = sample(x=1:nrow(d0.mc), size=nrow(d0.mc), replace=TRUE)
ts = setdiff(x=1:nrow(d0.mc), y=tr)
logits = lapply(items,
function(dx, d0, d1) {
data = data.frame(y=d1[, dx], d0)
fit = glm(y ~ ., data=data, family=binomial, subset=(d0.mc[, dx]==0))
return(fit)
}, d0=d0.mc[tr, , drop=FALSE], d1=d1.mc[tr, , drop=FALSE])
names(logits) = items
following = count.following(trajs=trajs, f0=f0.mc[tr, , drop=FALSE], f1=f1.mc[tr, , drop=FALSE])
fit = lapply(1:length(trajs),
function(k, trajs, items, logits, combn) {
itm(k=k, trajs=trajs, items=items, logits=logits, combn=combn)
}, trajs=trajs, items=items, logits=logits, combn=combn)
# Test
set.seed(b)
tr = sample(x=1:nrow(f0.fv), size=nrow(f0.fv), replace=TRUE)
ts = setdiff(x=1:nrow(f0.fv), y=tr)
nlls = lapply(1:20, function(i, rank, f0, i1, fit, following, errors) {
yhat = predict.tm(object=fit, data=f0, ks=rank[1:i], following=following, errors=errors)
yhat = yhat[cbind(seq_along(i1), i1)]
yhat[is.na (yhat)] = .Machine$double.eps
yhat[is.nan(yhat)] = .Machine$double.eps
yhat[yhat==0 ] = .Machine$double.eps
nll = -sum(log(yhat))
nll = nll / nrow(f0)
return( nll )
}, rank=rank, f0=f0.fv[ts, , drop=FALSE], i1=i1.fv[ts], fit=fit, following=following, errors=errors)
nlls = unlist(nlls)
return( nlls )
}, trajs=trajs.proposed.a_pp.mc, rank=rank.proposed.a_pp.mc, d0.mc=d.t0.mc, d1.mc=d.t1.mc, f0.mc=f.t0.mc, f1.mc=f.t1.mc, f0.fv=f.t0.fv, i1.fv=i.t1.fv, items=items, combn=combn.vs, errors=errors.proposed.a_pp, mc.cores=mc.cores)
# -------------------------------------------------------------------------------------
# Save the likelihood.
#
save(nlls.rank.proposed.a_pp.mc, nlls.rank.proposed.a_pp.fv, file='./data/likelihood.rank.proposed.a_pp.mc.rdata')
|
/likelihood.rank.proposed.a_pp.mc.r
|
no_license
|
wonsuk-oh/trajectories
|
R
| false
| false
| 7,901
|
r
|
# =====================================================================================
# Objective
# -
#
# -------------------------------------------------------------------------------------
# Load libraries and parameters
#
options(stringsAsFactors = FALSE)
options(contrasts=c(unordered="contr.treatment", ordered="contr.treatment"))
library(parallel)
mc.cores = detectCores()
source('./lib/node.r')
source('./lib/series.r')
source('./lib/trajectory.structure.r')
source('./lib/trajectory.likelihood.r')
# -------------------------------------------------------------------------------------
#
#
load('./data/trajectory.proposed.a_pp.mc.rdata')
# -------------------------------------------------------------------------------------
#
#
load('./data/rank.proposed.a_pp.mc.rdata')
rank.proposed.a_pp.mc = rank.proposed.a_pp.mc$order[1:20]
trajs.proposed.a_pp.mc = trajs.proposed.a_pp.mc[ rank.proposed.a_pp.mc ]
rank.proposed.a_pp.mc = 1:20
# -------------------------------------------------------------------------------------
# Load data, transform data, and initialize parameters.
#
# Mayo data
load('./data/preprocess.progression_pairs.3_year.mc.rdata')
# Rename.
d.t0.mc = d_prev_2005_2007
d.t1.mc = d_prev_2012_2014
rm(d_prev_2005_2007, d_prev_2012_2014)
# Convert a data to a set of prefixes.
f.t0.mc = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t0.mc, exclusion=TRUE, mc.cores=mc.cores)
f.t1.mc = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t1.mc, exclusion=TRUE, mc.cores=mc.cores)
f.t0.mc = cbind.data.frame(f.t0.mc)
f.t1.mc = cbind.data.frame(f.t1.mc)
colnames(f.t0.mc) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
colnames(f.t1.mc) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
# Construct
bool = c(FALSE, TRUE)
combn.vs = expand.grid(
ob = bool,
hld = bool,
htn = bool,
pred = bool,
dm = bool,
renal = bool,
pvd = bool,
cad = bool,
mi = bool,
brain = bool,
chf = bool
)
combn.vs.index = cbind(combn.vs, index=1:nrow(combn.vs))
i.t0.mc = merge(x=cbind(i=1:nrow(d.t0.mc), d.t0.mc), y=combn.vs.index, all.x=TRUE)
i.t0.mc = i.t0.mc[order(i.t0.mc$i), ]$index
i.t1.mc = merge(x=cbind(i=1:nrow(d.t1.mc), d.t1.mc), y=combn.vs.index, all.x=TRUE)
i.t1.mc = i.t1.mc[order(i.t1.mc$i), ]$index
# Fairview data
load('./data/preprocess.progression_pairs.3_year.fv.rdata')
# Rename.
d.t0.fv = d_prev_2005_2007
d.t1.fv = d_prev_2012_2014
rm(d_prev_2005_2007, d_prev_2012_2014)
# Convert a data to a set of prefixes.
f.t0.fv = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t0.fv, exclusion=TRUE, mc.cores=mc.cores)
f.t1.fv = mclapply(trajs.proposed.a_pp.mc, as.ordinal, data=d.t1.fv, exclusion=TRUE, mc.cores=mc.cores)
f.t0.fv = cbind.data.frame(f.t0.fv)
f.t1.fv = cbind.data.frame(f.t1.fv)
colnames(f.t0.fv) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
colnames(f.t1.fv) = paste('t', 1:length(trajs.proposed.a_pp.mc), sep='')
# Construct
bool = c(FALSE, TRUE)
combn.vs = expand.grid(
ob = bool,
hld = bool,
htn = bool,
pred = bool,
dm = bool,
renal = bool,
pvd = bool,
cad = bool,
mi = bool,
brain = bool,
chf = bool
)
combn.vs.index = cbind(combn.vs, index=1:nrow(combn.vs))
i.t0.fv = merge(x=cbind(i=1:nrow(d.t0.fv), d.t0.fv), y=combn.vs.index, all.x=TRUE)
i.t0.fv = i.t0.fv[order(i.t0.fv$i), ]$index
i.t1.fv = merge(x=cbind(i=1:nrow(d.t1.fv), d.t1.fv), y=combn.vs.index, all.x=TRUE)
i.t1.fv = i.t1.fv[order(i.t1.fv$i), ]$index
items = colnames(d.t0.mc)
# Documentation error.
# - For now, we are not going to allow documentation error.
errors.proposed.a_pp = .000
errors.proposed.a_pp = c(errors.proposed.a_pp, rep(0, length(items)-length(errors.proposed.a_pp)))
errors.proposed.a_pp = c(1-sum(errors.proposed.a_pp), errors.proposed.a_pp)
# -------------------------------------------------------------------------------------
#
#
B = 250
nlls.rank.proposed.a_pp.mc = mclapply(1:B, function(b, trajs, rank, d0.mc, d1.mc, f0.mc, f1.mc, i1.mc, items, combn, errors) {
cat('.')
# Train
set.seed(b)
tr = sample(x=1:nrow(d0.mc), size=nrow(d0.mc), replace=TRUE)
ts = setdiff(x=1:nrow(d0.mc), y=tr)
logits = lapply(items,
function(dx, d0, d1) {
data = data.frame(y=d1[, dx], d0)
fit = glm(y ~ ., data=data, family=binomial, subset=(d0[, dx]==0))
return(fit)
}, d0=d0.mc[tr, , drop=FALSE], d1=d1.mc[tr, , drop=FALSE])
names(logits) = items
following = count.following(trajs=trajs, f0=f0.mc[tr, , drop=FALSE], f1=f1.mc[tr, , drop=FALSE])
fit = lapply(1:length(trajs),
function(k, trajs, items, logits, combn) {
itm(k=k, trajs=trajs, items=items, logits=logits, combn=combn)
}, trajs=trajs, items=items, logits=logits, combn=combn)
# Test
nlls = lapply(1:20, function(i, rank, f0, i1, fit, following, errors) {
yhat = predict.tm(object=fit, data=f0, ks=rank[1:i], following=following, errors=errors)
yhat = yhat[cbind(seq_along(i1), i1)]
yhat[is.na (yhat)] = .Machine$double.eps
yhat[is.nan(yhat)] = .Machine$double.eps
yhat[yhat==0 ] = .Machine$double.eps
nll = -sum(log(yhat))
nll = nll / nrow(f0)
return( nll )
}, rank=rank, f0=f0.mc[ts, , drop=FALSE], i1=i1.mc[ts], fit=fit, following=following, errors=errors)
nlls = unlist(nlls)
return( nlls )
}, trajs=trajs.proposed.a_pp.mc, rank=rank.proposed.a_pp.mc, d0.mc=d.t0.mc, d1.mc=d.t1.mc, f0.mc=f.t0.mc, f1.mc=f.t1.mc, i1.mc=i.t1.mc, items=items, combn=combn.vs, errors=errors.proposed.a_pp, mc.cores=mc.cores)
B = 250
nlls.rank.proposed.a_pp.fv = mclapply(1:B, function(b, trajs, rank, d0.mc, d1.mc, f0.mc, f1.mc, f0.fv, i1.fv, items, combn, errors) {
cat('.')
# Train
set.seed(b)
tr = sample(x=1:nrow(d0.mc), size=nrow(d0.mc), replace=TRUE)
ts = setdiff(x=1:nrow(d0.mc), y=tr)
logits = lapply(items,
function(dx, d0, d1) {
data = data.frame(y=d1[, dx], d0)
fit = glm(y ~ ., data=data, family=binomial, subset=(d0.mc[, dx]==0))
return(fit)
}, d0=d0.mc[tr, , drop=FALSE], d1=d1.mc[tr, , drop=FALSE])
names(logits) = items
following = count.following(trajs=trajs, f0=f0.mc[tr, , drop=FALSE], f1=f1.mc[tr, , drop=FALSE])
fit = lapply(1:length(trajs),
function(k, trajs, items, logits, combn) {
itm(k=k, trajs=trajs, items=items, logits=logits, combn=combn)
}, trajs=trajs, items=items, logits=logits, combn=combn)
# Test
set.seed(b)
tr = sample(x=1:nrow(f0.fv), size=nrow(f0.fv), replace=TRUE)
ts = setdiff(x=1:nrow(f0.fv), y=tr)
nlls = lapply(1:20, function(i, rank, f0, i1, fit, following, errors) {
yhat = predict.tm(object=fit, data=f0, ks=rank[1:i], following=following, errors=errors)
yhat = yhat[cbind(seq_along(i1), i1)]
yhat[is.na (yhat)] = .Machine$double.eps
yhat[is.nan(yhat)] = .Machine$double.eps
yhat[yhat==0 ] = .Machine$double.eps
nll = -sum(log(yhat))
nll = nll / nrow(f0)
return( nll )
}, rank=rank, f0=f0.fv[ts, , drop=FALSE], i1=i1.fv[ts], fit=fit, following=following, errors=errors)
nlls = unlist(nlls)
return( nlls )
}, trajs=trajs.proposed.a_pp.mc, rank=rank.proposed.a_pp.mc, d0.mc=d.t0.mc, d1.mc=d.t1.mc, f0.mc=f.t0.mc, f1.mc=f.t1.mc, f0.fv=f.t0.fv, i1.fv=i.t1.fv, items=items, combn=combn.vs, errors=errors.proposed.a_pp, mc.cores=mc.cores)
# -------------------------------------------------------------------------------------
# Save the likelihood.
#
save(nlls.rank.proposed.a_pp.mc, nlls.rank.proposed.a_pp.fv, file='./data/likelihood.rank.proposed.a_pp.mc.rdata')
|
gset <- function(g)
{
# Edge and vertex Setting for the graph
E(g)$color = "black"
E(g)$width = 2
E(g)$arrow.width = .25
E(g)$label.cex = 1.4
E(g)$label.color = "black"
V(g)$label.color = "black"
V(g)$size =30
return(g)
}
|
/gset.R
|
no_license
|
OliverMount/AutocorrfMRI
|
R
| false
| false
| 239
|
r
|
gset <- function(g)
{
# Edge and vertex Setting for the graph
E(g)$color = "black"
E(g)$width = 2
E(g)$arrow.width = .25
E(g)$label.cex = 1.4
E(g)$label.color = "black"
V(g)$label.color = "black"
V(g)$size =30
return(g)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_multivariate_student_t_linear_operator}
\alias{tfd_multivariate_student_t_linear_operator}
\title{Multivariate Student's t-distribution on \code{R^k}}
\usage{
tfd_multivariate_student_t_linear_operator(df, loc, scale,
validate_args = FALSE, allow_nan_stats = TRUE,
name = "MultivariateStudentTLinearOperator")
}
\arguments{
\item{df}{A positive floating-point \code{Tensor}. Has shape \code{[B1, ..., Bb]} where \code{b >= 0}.}
\item{loc}{Floating-point \code{Tensor}. Has shape \code{[B1, ..., Bb, k]} where \code{k} is
the event size.}
\item{scale}{Instance of \code{LinearOperator} with a floating \code{dtype} and shape
\code{[B1, ..., Bb, k, k]}.}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\description{
Mathematical Details
}
\details{
The probability density function (pdf) is,\preformatted{pdf(x; df, loc, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - loc)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
}
where:
\itemize{
\item \code{df} is a positive scalar.
\item \code{loc} is a vector in \code{R^k},
\item \code{Sigma} is a positive definite \code{shape} matrix in \code{R^{k x k}}, parameterized
as \code{scale @ scale.T} in this class,
\item \code{Z} denotes the normalization constant, and,
\item \code{||y||**2} denotes the squared Euclidean norm of \code{y}.
}
The Multivariate Student's t-distribution distribution is a member of the
\href{https://en.wikipedia.org/wiki/Location-scale_family}{location-scale family}, i.e., it can be
constructed as,\preformatted{X ~ MultivariateT(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
}
}
\seealso{
Other distributions: \code{\link{tfd_autoregressive}},
\code{\link{tfd_batch_reshape}},
\code{\link{tfd_bernoulli}}, \code{\link{tfd_beta}},
\code{\link{tfd_binomial}},
\code{\link{tfd_categorical}}, \code{\link{tfd_cauchy}},
\code{\link{tfd_chi2}}, \code{\link{tfd_chi}},
\code{\link{tfd_deterministic}},
\code{\link{tfd_dirichlet_multinomial}},
\code{\link{tfd_dirichlet}}, \code{\link{tfd_empirical}},
\code{\link{tfd_exponential}},
\code{\link{tfd_gamma_gamma}}, \code{\link{tfd_gamma}},
\code{\link{tfd_gaussian_process}},
\code{\link{tfd_geometric}}, \code{\link{tfd_gumbel}},
\code{\link{tfd_half_cauchy}},
\code{\link{tfd_half_normal}},
\code{\link{tfd_hidden_markov_model}},
\code{\link{tfd_horseshoe}},
\code{\link{tfd_independent}},
\code{\link{tfd_inverse_gamma}},
\code{\link{tfd_inverse_gaussian}},
\code{\link{tfd_joint_distribution_named}},
\code{\link{tfd_joint_distribution_sequential}},
\code{\link{tfd_kumaraswamy}}, \code{\link{tfd_laplace}},
\code{\link{tfd_linear_gaussian_state_space_model}},
\code{\link{tfd_lkj}}, \code{\link{tfd_log_normal}},
\code{\link{tfd_logistic}},
\code{\link{tfd_mixture_same_family}},
\code{\link{tfd_mixture}}, \code{\link{tfd_multinomial}},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}},
\code{\link{tfd_multivariate_normal_diag}},
\code{\link{tfd_multivariate_normal_full_covariance}},
\code{\link{tfd_multivariate_normal_linear_operator}},
\code{\link{tfd_multivariate_normal_tri_l}},
\code{\link{tfd_negative_binomial}},
\code{\link{tfd_normal}},
\code{\link{tfd_one_hot_categorical}},
\code{\link{tfd_pareto}},
\code{\link{tfd_poisson_log_normal_quadrature_compound}},
\code{\link{tfd_poisson}}, \code{\link{tfd_quantized}},
\code{\link{tfd_relaxed_bernoulli}},
\code{\link{tfd_relaxed_one_hot_categorical}},
\code{\link{tfd_sample_distribution}},
\code{\link{tfd_sinh_arcsinh}},
\code{\link{tfd_student_t_process}},
\code{\link{tfd_student_t}},
\code{\link{tfd_transformed_distribution}},
\code{\link{tfd_triangular}},
\code{\link{tfd_truncated_normal}},
\code{\link{tfd_uniform}},
\code{\link{tfd_variational_gaussian_process}},
\code{\link{tfd_vector_diffeomixture}},
\code{\link{tfd_vector_exponential_diag}},
\code{\link{tfd_vector_exponential_linear_operator}},
\code{\link{tfd_vector_laplace_diag}},
\code{\link{tfd_vector_laplace_linear_operator}},
\code{\link{tfd_vector_sinh_arcsinh_diag}},
\code{\link{tfd_von_mises_fisher}},
\code{\link{tfd_von_mises}}, \code{\link{tfd_wishart}},
\code{\link{tfd_zipf}}
}
\concept{distributions}
|
/man/tfd_multivariate_student_t_linear_operator.Rd
|
permissive
|
dfalbel/tfprobability
|
R
| false
| true
| 4,886
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_multivariate_student_t_linear_operator}
\alias{tfd_multivariate_student_t_linear_operator}
\title{Multivariate Student's t-distribution on \code{R^k}}
\usage{
tfd_multivariate_student_t_linear_operator(df, loc, scale,
validate_args = FALSE, allow_nan_stats = TRUE,
name = "MultivariateStudentTLinearOperator")
}
\arguments{
\item{df}{A positive floating-point \code{Tensor}. Has shape \code{[B1, ..., Bb]} where \code{b >= 0}.}
\item{loc}{Floating-point \code{Tensor}. Has shape \code{[B1, ..., Bb, k]} where \code{k} is
the event size.}
\item{scale}{Instance of \code{LinearOperator} with a floating \code{dtype} and shape
\code{[B1, ..., Bb, k, k]}.}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\description{
Mathematical Details
}
\details{
The probability density function (pdf) is,\preformatted{pdf(x; df, loc, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - loc)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
}
where:
\itemize{
\item \code{df} is a positive scalar.
\item \code{loc} is a vector in \code{R^k},
\item \code{Sigma} is a positive definite \code{shape} matrix in \code{R^{k x k}}, parameterized
as \code{scale @ scale.T} in this class,
\item \code{Z} denotes the normalization constant, and,
\item \code{||y||**2} denotes the squared Euclidean norm of \code{y}.
}
The Multivariate Student's t-distribution distribution is a member of the
\href{https://en.wikipedia.org/wiki/Location-scale_family}{location-scale family}, i.e., it can be
constructed as,\preformatted{X ~ MultivariateT(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
}
}
\seealso{
Other distributions: \code{\link{tfd_autoregressive}},
\code{\link{tfd_batch_reshape}},
\code{\link{tfd_bernoulli}}, \code{\link{tfd_beta}},
\code{\link{tfd_binomial}},
\code{\link{tfd_categorical}}, \code{\link{tfd_cauchy}},
\code{\link{tfd_chi2}}, \code{\link{tfd_chi}},
\code{\link{tfd_deterministic}},
\code{\link{tfd_dirichlet_multinomial}},
\code{\link{tfd_dirichlet}}, \code{\link{tfd_empirical}},
\code{\link{tfd_exponential}},
\code{\link{tfd_gamma_gamma}}, \code{\link{tfd_gamma}},
\code{\link{tfd_gaussian_process}},
\code{\link{tfd_geometric}}, \code{\link{tfd_gumbel}},
\code{\link{tfd_half_cauchy}},
\code{\link{tfd_half_normal}},
\code{\link{tfd_hidden_markov_model}},
\code{\link{tfd_horseshoe}},
\code{\link{tfd_independent}},
\code{\link{tfd_inverse_gamma}},
\code{\link{tfd_inverse_gaussian}},
\code{\link{tfd_joint_distribution_named}},
\code{\link{tfd_joint_distribution_sequential}},
\code{\link{tfd_kumaraswamy}}, \code{\link{tfd_laplace}},
\code{\link{tfd_linear_gaussian_state_space_model}},
\code{\link{tfd_lkj}}, \code{\link{tfd_log_normal}},
\code{\link{tfd_logistic}},
\code{\link{tfd_mixture_same_family}},
\code{\link{tfd_mixture}}, \code{\link{tfd_multinomial}},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}},
\code{\link{tfd_multivariate_normal_diag}},
\code{\link{tfd_multivariate_normal_full_covariance}},
\code{\link{tfd_multivariate_normal_linear_operator}},
\code{\link{tfd_multivariate_normal_tri_l}},
\code{\link{tfd_negative_binomial}},
\code{\link{tfd_normal}},
\code{\link{tfd_one_hot_categorical}},
\code{\link{tfd_pareto}},
\code{\link{tfd_poisson_log_normal_quadrature_compound}},
\code{\link{tfd_poisson}}, \code{\link{tfd_quantized}},
\code{\link{tfd_relaxed_bernoulli}},
\code{\link{tfd_relaxed_one_hot_categorical}},
\code{\link{tfd_sample_distribution}},
\code{\link{tfd_sinh_arcsinh}},
\code{\link{tfd_student_t_process}},
\code{\link{tfd_student_t}},
\code{\link{tfd_transformed_distribution}},
\code{\link{tfd_triangular}},
\code{\link{tfd_truncated_normal}},
\code{\link{tfd_uniform}},
\code{\link{tfd_variational_gaussian_process}},
\code{\link{tfd_vector_diffeomixture}},
\code{\link{tfd_vector_exponential_diag}},
\code{\link{tfd_vector_exponential_linear_operator}},
\code{\link{tfd_vector_laplace_diag}},
\code{\link{tfd_vector_laplace_linear_operator}},
\code{\link{tfd_vector_sinh_arcsinh_diag}},
\code{\link{tfd_von_mises_fisher}},
\code{\link{tfd_von_mises}}, \code{\link{tfd_wishart}},
\code{\link{tfd_zipf}}
}
\concept{distributions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/act_fp_2020.R
\docType{data}
\name{act_fp_2020}
\alias{act_fp_2020}
\title{First preference results for the 2020 ACT election.}
\format{
A data frame with 13289 rows and 3 variables:
\describe{
\item{candidate}{Candidate's name}
\item{votes}{Number of formal votes}
\item{polling_place}{Name of the polling place}
}
}
\source{
\url{https://www.electionresults.act.gov.au/Results/PollingPlace}
}
\usage{
act_fp_2020
}
\description{
Data is copyright to the ACT Electoral Commission
}
\keyword{datasets}
|
/man/act_fp_2020.Rd
|
permissive
|
bryceroney/actvotes
|
R
| false
| true
| 586
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/act_fp_2020.R
\docType{data}
\name{act_fp_2020}
\alias{act_fp_2020}
\title{First preference results for the 2020 ACT election.}
\format{
A data frame with 13289 rows and 3 variables:
\describe{
\item{candidate}{Candidate's name}
\item{votes}{Number of formal votes}
\item{polling_place}{Name of the polling place}
}
}
\source{
\url{https://www.electionresults.act.gov.au/Results/PollingPlace}
}
\usage{
act_fp_2020
}
\description{
Data is copyright to the ACT Electoral Commission
}
\keyword{datasets}
|
#' Unload a package
#'
#' This function attempts to cleanly unload a package, including unloading
#' its namespace, deleting S4 class definitions and unloading any loaded
#' DLLs. Unfortunately S4 classes are not really designed to be cleanly
#' unloaded, and so we have to manually modify the class dependency graph in
#' order for it to work - this works on the cases for which we have tested
#' but there may be others. Similarly, automated DLL unloading is best tested
#' for simple scenarios (particularly with `useDynLib(pkgname)` and may
#' fail in other cases. If you do encounter a failure, please file a bug report
#' at \url{https://github.com/r-lib/pkgload/issues}.
#'
#' @inheritParams ns_env
#' @param quiet if `TRUE` suppresses output from this function.
#'
#' @examples
#' \dontrun{
#' # Unload package that is in current directory
#' unload()
#'
#' # Unload package that is in ./ggplot2/
#' unload(pkg_name("ggplot2/"))
#'
#' library(ggplot2)
#' # unload the ggplot2 package directly by name
#' unload("ggplot2")
#' }
#' @export
unload <- function(package = pkg_name(), quiet = FALSE) {
if (package == "compiler") {
# Disable JIT compilation as it could interfere with the compiler
# unloading. Also, if the JIT was kept enabled, it would cause the
# compiler package to be loaded again soon, anyway. Note if we
# restored the JIT level after the unloading, the call to
# enableJIT itself would load the compiler again.
oldEnable <- compiler::enableJIT(0)
if (oldEnable != 0) {
warning("JIT automatically disabled when unloading the compiler.")
}
}
if (!package %in% loadedNamespaces()) {
stop("Package ", package, " not found in loaded packages or namespaces")
}
unregister_methods(package)
# unloadNamespace calls onUnload hook and .onUnload, and detaches the
# package if it's attached. It will fail if a loaded package needs it.
unloaded <- tryCatch({
unloadNamespace(package)
TRUE
}, error = function(e) FALSE)
if (!unloaded) {
# unloadNamespace() failed before we get to the detach, so need to
# manually detach
unload_pkg_env(package)
# Can't use loadedNamespaces() and unloadNamespace() here because
# things can be in a weird state.
unregister_namespace(package)
}
# Clear so that loading the package again will re-read all files
clear_cache()
# Do this after detach, so that packages that have an .onUnload function
# which unloads DLLs (like MASS) won't try to unload the DLL twice.
unload_dll(package)
}
unload_pkg_env <- function(package) {
if (is_attached(package)) {
pos <- which(pkg_env_name(package) == search())
suppressWarnings(detach(pos = pos, force = TRUE))
}
}
# This unloads dlls loaded by either library() or load_all()
unload_dll <- function(package) {
# Always run garbage collector to force any deleted external pointers to
# finalise
gc()
# Special case for devtools - don't unload DLL because we need to be able
# to access nsreg() in the DLL in order to run makeNamespace. This means
# that changes to compiled code in devtools can't be reloaded with
# load_all -- it requires a reinstallation.
if (package == "pkgload") {
return(invisible())
}
pkglibs <- loaded_dlls(package)
for (lib in pkglibs) {
dyn.unload(lib[["path"]])
}
# Remove the unloaded dlls from .dynLibs()
libs <- .dynLibs()
.dynLibs(libs[!(libs %in% pkglibs)])
invisible()
}
s3_unregister <- function(package) {
ns <- ns_env(package)
# If the package is loaded, but not installed this will fail, so we bail out in that case.
ns_defs <- suppressWarnings(try(parse_ns_file(system.file(package = package)), silent = TRUE))
if (inherits(ns_defs, "try-error")) {
return()
}
methods <- ns_defs$S3methods[, 1:2, drop = FALSE]
for (i in seq_len(nrow(methods))) {
method <- methods[i, , drop = FALSE]
generic <- env_get(ns, method[[1]], inherit = TRUE, default = NULL)
if (is_null(generic)) {
next
}
generic_ns <- topenv(fn_env(generic))
if (!is_namespace(generic_ns)) {
next
}
table <- generic_ns$.__S3MethodsTable__.
if (!is_environment(table)) {
next
}
nm <- paste0(method, collapse = ".")
env_unbind(table, nm)
}
}
|
/R/unload.r
|
no_license
|
gaborcsardi/pkgload
|
R
| false
| false
| 4,291
|
r
|
#' Unload a package
#'
#' This function attempts to cleanly unload a package, including unloading
#' its namespace, deleting S4 class definitions and unloading any loaded
#' DLLs. Unfortunately S4 classes are not really designed to be cleanly
#' unloaded, and so we have to manually modify the class dependency graph in
#' order for it to work - this works on the cases for which we have tested
#' but there may be others. Similarly, automated DLL unloading is best tested
#' for simple scenarios (particularly with `useDynLib(pkgname)` and may
#' fail in other cases. If you do encounter a failure, please file a bug report
#' at \url{https://github.com/r-lib/pkgload/issues}.
#'
#' @inheritParams ns_env
#' @param quiet if `TRUE` suppresses output from this function.
#'
#' @examples
#' \dontrun{
#' # Unload package that is in current directory
#' unload()
#'
#' # Unload package that is in ./ggplot2/
#' unload(pkg_name("ggplot2/"))
#'
#' library(ggplot2)
#' # unload the ggplot2 package directly by name
#' unload("ggplot2")
#' }
#' @export
unload <- function(package = pkg_name(), quiet = FALSE) {
if (package == "compiler") {
# Disable JIT compilation as it could interfere with the compiler
# unloading. Also, if the JIT was kept enabled, it would cause the
# compiler package to be loaded again soon, anyway. Note if we
# restored the JIT level after the unloading, the call to
# enableJIT itself would load the compiler again.
oldEnable <- compiler::enableJIT(0)
if (oldEnable != 0) {
warning("JIT automatically disabled when unloading the compiler.")
}
}
if (!package %in% loadedNamespaces()) {
stop("Package ", package, " not found in loaded packages or namespaces")
}
unregister_methods(package)
# unloadNamespace calls onUnload hook and .onUnload, and detaches the
# package if it's attached. It will fail if a loaded package needs it.
unloaded <- tryCatch({
unloadNamespace(package)
TRUE
}, error = function(e) FALSE)
if (!unloaded) {
# unloadNamespace() failed before we get to the detach, so need to
# manually detach
unload_pkg_env(package)
# Can't use loadedNamespaces() and unloadNamespace() here because
# things can be in a weird state.
unregister_namespace(package)
}
# Clear so that loading the package again will re-read all files
clear_cache()
# Do this after detach, so that packages that have an .onUnload function
# which unloads DLLs (like MASS) won't try to unload the DLL twice.
unload_dll(package)
}
unload_pkg_env <- function(package) {
if (is_attached(package)) {
pos <- which(pkg_env_name(package) == search())
suppressWarnings(detach(pos = pos, force = TRUE))
}
}
# This unloads dlls loaded by either library() or load_all()
unload_dll <- function(package) {
# Always run garbage collector to force any deleted external pointers to
# finalise
gc()
# Special case for devtools - don't unload DLL because we need to be able
# to access nsreg() in the DLL in order to run makeNamespace. This means
# that changes to compiled code in devtools can't be reloaded with
# load_all -- it requires a reinstallation.
if (package == "pkgload") {
return(invisible())
}
pkglibs <- loaded_dlls(package)
for (lib in pkglibs) {
dyn.unload(lib[["path"]])
}
# Remove the unloaded dlls from .dynLibs()
libs <- .dynLibs()
.dynLibs(libs[!(libs %in% pkglibs)])
invisible()
}
s3_unregister <- function(package) {
ns <- ns_env(package)
# If the package is loaded, but not installed this will fail, so we bail out in that case.
ns_defs <- suppressWarnings(try(parse_ns_file(system.file(package = package)), silent = TRUE))
if (inherits(ns_defs, "try-error")) {
return()
}
methods <- ns_defs$S3methods[, 1:2, drop = FALSE]
for (i in seq_len(nrow(methods))) {
method <- methods[i, , drop = FALSE]
generic <- env_get(ns, method[[1]], inherit = TRUE, default = NULL)
if (is_null(generic)) {
next
}
generic_ns <- topenv(fn_env(generic))
if (!is_namespace(generic_ns)) {
next
}
table <- generic_ns$.__S3MethodsTable__.
if (!is_environment(table)) {
next
}
nm <- paste0(method, collapse = ".")
env_unbind(table, nm)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Read in multiple files by year}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{A vector of years to read in}
}
\value{
returns a list of dataframes with columns MONTH and year, NULL when an error is encountered
}
\description{
\code{fars_read_years} takes a vector of years it iterates over them to
create filenames to read in and return with only the MONTH and year columns
selected. The function will create a filename depending on the year input, if the
file does not exist an error will be thrown. If it does exist, it will attempt to
read them in, mutate a new column with the year and then select the columns MONTH
and year.
}
\note{
this function depends on dplyr mutate and select functions
}
\examples{
\dontrun{
fars_read_years(c(2013, 2014))
}
}
|
/man/fars_summarize_years.Rd
|
no_license
|
itfeature/Week4Rpackage
|
R
| false
| true
| 936
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Read in multiple files by year}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{A vector of years to read in}
}
\value{
returns a list of dataframes with columns MONTH and year, NULL when an error is encountered
}
\description{
\code{fars_read_years} takes a vector of years it iterates over them to
create filenames to read in and return with only the MONTH and year columns
selected. The function will create a filename depending on the year input, if the
file does not exist an error will be thrown. If it does exist, it will attempt to
read them in, mutate a new column with the year and then select the columns MONTH
and year.
}
\note{
this function depends on dplyr mutate and select functions
}
\examples{
\dontrun{
fars_read_years(c(2013, 2014))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_sidebar.R
\name{mod_sidebar_ui}
\alias{mod_sidebar_ui}
\alias{mod_sidebar_server}
\title{mod_sidebar_ui and mod_sidebar_server}
\usage{
mod_sidebar_ui(id)
mod_sidebar_server(input, output, session)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module.
}
\keyword{internal}
|
/man/mod_sidebar.Rd
|
permissive
|
MehdiChelh/SIA
|
R
| false
| true
| 443
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_sidebar.R
\name{mod_sidebar_ui}
\alias{mod_sidebar_ui}
\alias{mod_sidebar_server}
\title{mod_sidebar_ui and mod_sidebar_server}
\usage{
mod_sidebar_ui(id)
mod_sidebar_server(input, output, session)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module.
}
\keyword{internal}
|
\name{sir_additive}
\alias{sir_additive}
\title{SIR model with equal births and deaths and constant additive noise}
\description{
A basic SIR model with equal births and deaths; 3 compartments susceptible, infection and recovery and additive noise added to the transimtion rate. The processes is solved using \code{\link{ode}}
}
\usage{
sir(pars = NULL, init = NULL, time = NULL)
}
\arguments{\item{pars}{\code{\link{vector}} of 5 parameters \code{beta}, \code{gamma}, \code{mu}, \code{psi} and \code{N}. \code{beta} is the transmission rate, \code{gamma} is the recovery rate, \code{mu} is the per capita death rate, and the population level birth rate, \code{psi} is the noise term and \code{N} the entire population}
\item{init}{\code{\link{vector}} of 3 parameters \code{S} (Number of Susceptibles), \code{I} (Number of Infections) and \code{R} (Number of Recoeverds). These values are the starting values}
\item{time}{time sequence}
}
\value{\code{\link{list}}{The list contains 4 elements.First, second and third are the arguments of the function \code{$pars},\code{$init}, \code{$time}. The fourth argument \code{results}, is a \code{\link{data.frame}} and S3 element (class type 'sir'). The results for each stage across a specific time-frame are displayed in this dataframe}
}
\details{
\code{S},\code{I},\code{R} must be positive and S + I + R <= N.
}
\examples{
\code{# Input parameters and initial stages.
parameters <- c(mu = 0.003,
beta = 1, gamma = 0.1, psi = 0.9, N = 40000)
initials <- c(S = 20000, I = 500, R =0)
times <- 0:30
# Solve
sir.additive <- sir_additive(pars = parameters, init = initials, time = times)
#Plot
plot(sir.additive$results)
}
}
|
/man/sir_additive.Rd
|
no_license
|
NicolasPhysentzides/popdyn
|
R
| false
| false
| 1,735
|
rd
|
\name{sir_additive}
\alias{sir_additive}
\title{SIR model with equal births and deaths and constant additive noise}
\description{
A basic SIR model with equal births and deaths; 3 compartments susceptible, infection and recovery and additive noise added to the transimtion rate. The processes is solved using \code{\link{ode}}
}
\usage{
sir(pars = NULL, init = NULL, time = NULL)
}
\arguments{\item{pars}{\code{\link{vector}} of 5 parameters \code{beta}, \code{gamma}, \code{mu}, \code{psi} and \code{N}. \code{beta} is the transmission rate, \code{gamma} is the recovery rate, \code{mu} is the per capita death rate, and the population level birth rate, \code{psi} is the noise term and \code{N} the entire population}
\item{init}{\code{\link{vector}} of 3 parameters \code{S} (Number of Susceptibles), \code{I} (Number of Infections) and \code{R} (Number of Recoeverds). These values are the starting values}
\item{time}{time sequence}
}
\value{\code{\link{list}}{The list contains 4 elements.First, second and third are the arguments of the function \code{$pars},\code{$init}, \code{$time}. The fourth argument \code{results}, is a \code{\link{data.frame}} and S3 element (class type 'sir'). The results for each stage across a specific time-frame are displayed in this dataframe}
}
\details{
\code{S},\code{I},\code{R} must be positive and S + I + R <= N.
}
\examples{
\code{# Input parameters and initial stages.
parameters <- c(mu = 0.003,
beta = 1, gamma = 0.1, psi = 0.9, N = 40000)
initials <- c(S = 20000, I = 500, R =0)
times <- 0:30
# Solve
sir.additive <- sir_additive(pars = parameters, init = initials, time = times)
#Plot
plot(sir.additive$results)
}
}
|
rm(list=ls())
library(cttools)
library(fslr)
options(matlab.path='/Applications/MATLAB_R2016a.app/bin')
setup <- function(id, study = "Registration"){
username <- Sys.info()["user"][[1]]
cluster=FALSE
if (username %in% c("muschellij2", "johnmuschelli")){
# rootdir <- "/Volumes/DATA/New_Age_Test"
rootdir <- "~/CT_Registration"
} else {
rootdir <- "/dexter/disk2/smart/stroke_ct/ident"
cluster =TRUE;
}
rootdir <- path.expand(rootdir)
# ss <- as.numeric(strsplit(id, "-")[[1]][2])
# if (ss > 4000){
# study <- "CLEAR_III"
# dpath <- file.path("CLEAR", "CLEAR III")
# } else if (ss > 300 & ss < 500){
# dpath <- study <- "MISTIE"
# } else if (ss > 500 & ss < 4000) {
# dpath <- study <- "ICES"
# }
rootdir <<- path.expand(rootdir)
homedir <<- file.path(rootdir, study)
homedir <<- path.expand(homedir)
#progdir <- file.path(dirname(basedir), "programs")
progdir <- file.path(rootdir, "programs")
# source(file.path(progdir, "convert_DICOM.R"))
# source(file.path(progdir, "fslhd.R"))
basedir <<- file.path(homedir, id)
}
#### setting up if things are on the cluster or not
study = "Registration"
setup(study, study=study)
ids = list.dirs(homedir, recursive=FALSE, full.names=FALSE)
ids = basename(ids)
ids = grep("\\d\\d\\d-(\\d|)\\d\\d\\d", ids, value=TRUE)
length(ids)
### initial setup
iid <- as.numeric(Sys.getenv("SGE_TASK_ID"))
if (is.na(iid)) iid <- 1
id <- ids[iid]
setup(id, study = study)
setwd(basedir)
# cutoffs <- 300
iname <- 1
fnames <- list.files(path=basedir, pattern=".nii.gz",
recursive=FALSE, full.names = TRUE)
fnames <- fnames[grepl("_CT_", fnames)]
fnames <- fnames[!grepl("rigid", fnames)]
intensity = .01
stub = paste0(nii.stub(basename(fnames)),
sprintf("_SS_Mask_%0.2g", intensity), ".nii.gz")
ss = file.path(dirname(fnames), "Skull_Stripped", stub)
df = data.frame(img = fnames,
ss = ss,
stringsAsFactors=FALSE)
exist = apply(df[, c("img", "ss")], 1, file.exists)
exist = apply(exist, 2, all)
df = df[exist, ]
ifname <- df$img[1]
masks <- c(FALSE, TRUE)
scenarios <- expand.grid(mask=masks)
iscen <- 2
### This is to threshold the values again
rethresh <- FALSE
# for (mask in masks){
mask = scenarios$mask[iscen]
# iscen = as.integer(Sys.getenv("SGE_TASK_ID"))
# if (is.na(iscen)) stop("no scenario denoted")
outdir <- file.path(basedir, "Coregistered")
if (!file.exists(outdir)){
dir.create(outdir, showWarnings=FALSE)
}
addstub = ""
if (mask) {
addstub <- paste0(addstub, "_skullmask")
}
fnames = df$img
if (mask){
fnames = df$ss
}
ifname <- fnames[1]
ref.img <- fnames[1]
for (iname in 2:length(fnames)){
ifname <- fnames[iname]
stub <- paste0("rigid_", basename(ifname))
## strip off .nii.gz or .nii
outfile <- file.path(outdir, paste0(nii.stub(stub), addstub))
outmat = paste0(nii.stub(outfile), ".mat")
opts = "-v"
if (mask){
opts = paste(opts, "-cost leastsq")
}
ret = flirt(infile=ifname, reffile=ref.img, outfile=outfile,
omat = outmat,
dof = 6, intern=FALSE, retimg = FALSE,
opts = "-cost leastsq -v" )
## used mutualinfo before
# flirt.wrap(image=ifname, rigid=TRUE, run=TRUE, ref=ref.img,
# mask=mask)
cat("\n\n")
# bn <- basename(ifname)
# fslthresh(image=ifname, autoname=TRUE, outdir=outdir, lower=icut)
}
print(icut)
# }
|
/Bet_Skull_Registration.R
|
no_license
|
muschellij2/CT_Pipeline
|
R
| false
| false
| 3,418
|
r
|
rm(list=ls())
library(cttools)
library(fslr)
options(matlab.path='/Applications/MATLAB_R2016a.app/bin')
setup <- function(id, study = "Registration"){
username <- Sys.info()["user"][[1]]
cluster=FALSE
if (username %in% c("muschellij2", "johnmuschelli")){
# rootdir <- "/Volumes/DATA/New_Age_Test"
rootdir <- "~/CT_Registration"
} else {
rootdir <- "/dexter/disk2/smart/stroke_ct/ident"
cluster =TRUE;
}
rootdir <- path.expand(rootdir)
# ss <- as.numeric(strsplit(id, "-")[[1]][2])
# if (ss > 4000){
# study <- "CLEAR_III"
# dpath <- file.path("CLEAR", "CLEAR III")
# } else if (ss > 300 & ss < 500){
# dpath <- study <- "MISTIE"
# } else if (ss > 500 & ss < 4000) {
# dpath <- study <- "ICES"
# }
rootdir <<- path.expand(rootdir)
homedir <<- file.path(rootdir, study)
homedir <<- path.expand(homedir)
#progdir <- file.path(dirname(basedir), "programs")
progdir <- file.path(rootdir, "programs")
# source(file.path(progdir, "convert_DICOM.R"))
# source(file.path(progdir, "fslhd.R"))
basedir <<- file.path(homedir, id)
}
#### setting up if things are on the cluster or not
study = "Registration"
setup(study, study=study)
ids = list.dirs(homedir, recursive=FALSE, full.names=FALSE)
ids = basename(ids)
ids = grep("\\d\\d\\d-(\\d|)\\d\\d\\d", ids, value=TRUE)
length(ids)
### initial setup
iid <- as.numeric(Sys.getenv("SGE_TASK_ID"))
if (is.na(iid)) iid <- 1
id <- ids[iid]
setup(id, study = study)
setwd(basedir)
# cutoffs <- 300
iname <- 1
fnames <- list.files(path=basedir, pattern=".nii.gz",
recursive=FALSE, full.names = TRUE)
fnames <- fnames[grepl("_CT_", fnames)]
fnames <- fnames[!grepl("rigid", fnames)]
intensity = .01
stub = paste0(nii.stub(basename(fnames)),
sprintf("_SS_Mask_%0.2g", intensity), ".nii.gz")
ss = file.path(dirname(fnames), "Skull_Stripped", stub)
df = data.frame(img = fnames,
ss = ss,
stringsAsFactors=FALSE)
exist = apply(df[, c("img", "ss")], 1, file.exists)
exist = apply(exist, 2, all)
df = df[exist, ]
ifname <- df$img[1]
masks <- c(FALSE, TRUE)
scenarios <- expand.grid(mask=masks)
iscen <- 2
### This is to threshold the values again
rethresh <- FALSE
# for (mask in masks){
mask = scenarios$mask[iscen]
# iscen = as.integer(Sys.getenv("SGE_TASK_ID"))
# if (is.na(iscen)) stop("no scenario denoted")
outdir <- file.path(basedir, "Coregistered")
if (!file.exists(outdir)){
dir.create(outdir, showWarnings=FALSE)
}
addstub = ""
if (mask) {
addstub <- paste0(addstub, "_skullmask")
}
fnames = df$img
if (mask){
fnames = df$ss
}
ifname <- fnames[1]
ref.img <- fnames[1]
for (iname in 2:length(fnames)){
ifname <- fnames[iname]
stub <- paste0("rigid_", basename(ifname))
## strip off .nii.gz or .nii
outfile <- file.path(outdir, paste0(nii.stub(stub), addstub))
outmat = paste0(nii.stub(outfile), ".mat")
opts = "-v"
if (mask){
opts = paste(opts, "-cost leastsq")
}
ret = flirt(infile=ifname, reffile=ref.img, outfile=outfile,
omat = outmat,
dof = 6, intern=FALSE, retimg = FALSE,
opts = "-cost leastsq -v" )
## used mutualinfo before
# flirt.wrap(image=ifname, rigid=TRUE, run=TRUE, ref=ref.img,
# mask=mask)
cat("\n\n")
# bn <- basename(ifname)
# fslthresh(image=ifname, autoname=TRUE, outdir=outdir, lower=icut)
}
print(icut)
# }
|
#Assignment - 3
library(readxl) #Library to read spreadsheet based files
library(ggplot2) #For visualisation
options(scipen=999) #To avoid scientific notations
set.seed(12345)
#Loading data
tecatorData = read_excel("tecator.xlsx",sheet = "data")
#1. plotting Moisture VS Protein
plot(tecatorData$Protein, tecatorData$Moisture, main="Moisture vs. Protein",xlab = "Protein",ylab = "Moisture",col="blue")
abline(lm(formula = Moisture ~ Protein, data=tecatorData))
#Subsetting data
requiredData = tecatorData[,103:104]
id = sample(1:nrow(requiredData),floor(nrow(requiredData)*0.5))
train = requiredData[id,]
test = requiredData[-id,]
#Linear Regression
#M1
tecatorModel_1 = lm(formula = Moisture ~ Protein, data=train)
sm1 = summary(tecatorModel_1)
sm1
#M2
tecatorModel_2 = lm(formula = Moisture ~ Protein+I(Protein^2), data=train)
sm2 = summary(tecatorModel_2)
sm2
#M3
tecatorModel_3 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3), data=train)
sm3 = summary(tecatorModel_3)
sm3
#M4
tecatorModel_4 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3)+I(Protein^4), data=train)
sm4 = summary(tecatorModel_4)
sm4
#M5
tecatorModel_5 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3)+I(Protein^4)+I(Protein^5), data=train)
sm5 = summary(tecatorModel_5)
sm5
#M6
tecatorModel_6 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3)+I(Protein^4)+I(Protein^5)+I(Protein^6), data=train)
sm6 = summary(tecatorModel_6)
sm6
MSE_train = numeric(6)
MSE_train[1] = mean((train$Moisture-predict.lm(tecatorModel_1,train[,1]))^2)
MSE_train[2] = mean((train$Moisture-predict.lm(tecatorModel_2,train[,1]))^2)
MSE_train[3] = mean((train$Moisture-predict.lm(tecatorModel_3,train[,1]))^2)
MSE_train[4] = mean((train$Moisture-predict.lm(tecatorModel_4,train[,1]))^2)
MSE_train[5] = mean((train$Moisture-predict.lm(tecatorModel_5,train[,1]))^2)
MSE_train[6] = mean((train$Moisture-predict.lm(tecatorModel_6,train[,1]))^2)
MSE_test = numeric(6)
MSE_test[1] = mean((test$Moisture-predict.lm(tecatorModel_1,test[,1]))^2)
MSE_test[2] = mean((test$Moisture-predict.lm(tecatorModel_2,test[,1]))^2)
MSE_test[3] = mean((test$Moisture-predict.lm(tecatorModel_3,test[,1]))^2)
MSE_test[4] = mean((test$Moisture-predict.lm(tecatorModel_4,test[,1]))^2)
MSE_test[5] = mean((test$Moisture-predict.lm(tecatorModel_5,test[,1]))^2)
MSE_test[6] = mean((test$Moisture-predict.lm(tecatorModel_6,test[,1]))^2)
plot(1:6,MSE_train,type="l",ylim = c(20,45),col="red",lwd=2.5,xlab = "Index",ylab = "MSE")
lines(1:6,MSE_test,col="blue",lwd=2.5)
legend(x="center",legend = c("MSE_Test","MSE_Train"),lwd=1,col=c("Blue","Red"),lty=1)
print(anova(tecatorModel_1,tecatorModel_2,tecatorModel_3,tecatorModel_4,tecatorModel_5,tecatorModel_6))
#4. Variable Selection
library(MASS)
aicData = tecatorData
aicData = aicData[,-c(103:104)]
aicData = aicData[,-c(1)]
stepaic = stepAIC(lm(Fat~.,data = aicData))
stepaic$anova
#5. Ridge Regression
library(glmnet)
covariates = scale(tecatorData[, 2:101])
response = scale(tecatorData$Fat)
ridge_model = glmnet(as.matrix(covariates),
response, alpha = 0, family="gaussian")
plot(ridge_model, xvar="lambda", label=TRUE, main="Ridge regression \n\n")
#6. Lasso Regression
lasso_model = glmnet(as.matrix(covariates),
response, alpha = 1, family="gaussian")
plot(lasso_model, xvar="lambda", label=TRUE, main="LASSO regression\n\n")
#7. Cross-Validation
lasso_model_cv = cv.glmnet(as.matrix(covariates), response, alpha=1, family="gaussian", lambda=seq(0,1,0.001))
plot(lasso_model_cv, xvar="lamdba", label=TRUE, main="LASSO Cross-validation\n\n")
coef(lasso_model_cv, s="lambda.min")
print(lasso_model_cv$lambda.min)
|
/Lab-1/Assignment_4.R
|
no_license
|
lawrence2269/TDDE01
|
R
| false
| false
| 3,700
|
r
|
#Assignment - 3
library(readxl) #Library to read spreadsheet based files
library(ggplot2) #For visualisation
options(scipen=999) #To avoid scientific notations
set.seed(12345)
#Loading data
tecatorData = read_excel("tecator.xlsx",sheet = "data")
#1. plotting Moisture VS Protein
plot(tecatorData$Protein, tecatorData$Moisture, main="Moisture vs. Protein",xlab = "Protein",ylab = "Moisture",col="blue")
abline(lm(formula = Moisture ~ Protein, data=tecatorData))
#Subsetting data
requiredData = tecatorData[,103:104]
id = sample(1:nrow(requiredData),floor(nrow(requiredData)*0.5))
train = requiredData[id,]
test = requiredData[-id,]
#Linear Regression
#M1
tecatorModel_1 = lm(formula = Moisture ~ Protein, data=train)
sm1 = summary(tecatorModel_1)
sm1
#M2
tecatorModel_2 = lm(formula = Moisture ~ Protein+I(Protein^2), data=train)
sm2 = summary(tecatorModel_2)
sm2
#M3
tecatorModel_3 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3), data=train)
sm3 = summary(tecatorModel_3)
sm3
#M4
tecatorModel_4 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3)+I(Protein^4), data=train)
sm4 = summary(tecatorModel_4)
sm4
#M5
tecatorModel_5 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3)+I(Protein^4)+I(Protein^5), data=train)
sm5 = summary(tecatorModel_5)
sm5
#M6
tecatorModel_6 = lm(formula = Moisture ~ Protein+I(Protein^2)+I(Protein^3)+I(Protein^4)+I(Protein^5)+I(Protein^6), data=train)
sm6 = summary(tecatorModel_6)
sm6
MSE_train = numeric(6)
MSE_train[1] = mean((train$Moisture-predict.lm(tecatorModel_1,train[,1]))^2)
MSE_train[2] = mean((train$Moisture-predict.lm(tecatorModel_2,train[,1]))^2)
MSE_train[3] = mean((train$Moisture-predict.lm(tecatorModel_3,train[,1]))^2)
MSE_train[4] = mean((train$Moisture-predict.lm(tecatorModel_4,train[,1]))^2)
MSE_train[5] = mean((train$Moisture-predict.lm(tecatorModel_5,train[,1]))^2)
MSE_train[6] = mean((train$Moisture-predict.lm(tecatorModel_6,train[,1]))^2)
MSE_test = numeric(6)
MSE_test[1] = mean((test$Moisture-predict.lm(tecatorModel_1,test[,1]))^2)
MSE_test[2] = mean((test$Moisture-predict.lm(tecatorModel_2,test[,1]))^2)
MSE_test[3] = mean((test$Moisture-predict.lm(tecatorModel_3,test[,1]))^2)
MSE_test[4] = mean((test$Moisture-predict.lm(tecatorModel_4,test[,1]))^2)
MSE_test[5] = mean((test$Moisture-predict.lm(tecatorModel_5,test[,1]))^2)
MSE_test[6] = mean((test$Moisture-predict.lm(tecatorModel_6,test[,1]))^2)
plot(1:6,MSE_train,type="l",ylim = c(20,45),col="red",lwd=2.5,xlab = "Index",ylab = "MSE")
lines(1:6,MSE_test,col="blue",lwd=2.5)
legend(x="center",legend = c("MSE_Test","MSE_Train"),lwd=1,col=c("Blue","Red"),lty=1)
print(anova(tecatorModel_1,tecatorModel_2,tecatorModel_3,tecatorModel_4,tecatorModel_5,tecatorModel_6))
#4. Variable Selection
library(MASS)
aicData = tecatorData
aicData = aicData[,-c(103:104)]
aicData = aicData[,-c(1)]
stepaic = stepAIC(lm(Fat~.,data = aicData))
stepaic$anova
#5. Ridge Regression
library(glmnet)
covariates = scale(tecatorData[, 2:101])
response = scale(tecatorData$Fat)
ridge_model = glmnet(as.matrix(covariates),
response, alpha = 0, family="gaussian")
plot(ridge_model, xvar="lambda", label=TRUE, main="Ridge regression \n\n")
#6. Lasso Regression
lasso_model = glmnet(as.matrix(covariates),
response, alpha = 1, family="gaussian")
plot(lasso_model, xvar="lambda", label=TRUE, main="LASSO regression\n\n")
#7. Cross-Validation
lasso_model_cv = cv.glmnet(as.matrix(covariates), response, alpha=1, family="gaussian", lambda=seq(0,1,0.001))
plot(lasso_model_cv, xvar="lamdba", label=TRUE, main="LASSO Cross-validation\n\n")
coef(lasso_model_cv, s="lambda.min")
print(lasso_model_cv$lambda.min)
|
#Data files needed for analysis - all available in the Habitat-Loss-Simulations repository
#GridPathForAnalysis.csv
#BirdsGridsJoin.csv - each bird record also has the Grid ID of where it is located for all 7 grid sizes; zipped file because original was too large
#PlantsGridsJoin.csv - same for plants
#BirdNames.csv
#PlantNames.csv
#Read in data files
FN <- read.csv("FullNetworkpresabs.csv")
BirdsEverything <- read.csv("BirdsGridsJoin.csv")
PlantsEverything <- read.csv("PlantsGridsJoin.csv")
BirdNames <- read.csv("BirdNames.csv")
PlantNames <- read.csv("PlantNames.csv")
Birds <- merge(BirdsEverything, BirdNames)
Plants <- merge(PlantsEverything, PlantNames)
#Limit bird observations to April-September
Birds <- subset(Birds, Birds$MONTH!=10)
Birds <- subset(Birds, Birds$MONTH!=11)
Birds <- subset(Birds, Birds$MONTH!=12)
Birds <- subset(Birds, Birds$MONTH!=1)
Birds <- subset(Birds, Birds$MONTH!=2)
Birds <- subset(Birds, Birds$MONTH!=3)
#there are 5 bird species in the eBird files that aren't found in the full network - ACWO, BBWO, CAGU, HOOR, SCOR
Birds <- subset(Birds, Birds$NameCode!="ACWO")
Birds <- subset(Birds, Birds$NameCode!="BBWO")
Birds <- subset(Birds, Birds$NameCode!="CAGU")
Birds <- subset(Birds, Birds$NameCode!="HOOR")
Birds <- subset(Birds, Birds$NameCode!="SCOR")
#Load in the paths data file for the area sizes
Paths <- read.csv("GridPathForAnalysis.csv")
Paths <- Paths[,c(5,6,11,12,17,18,23,24,29,30,35,36,41,42)]
Areas6 <- cbind(Paths$ID_6, Paths$Area_6)
colnames(Areas6) <- c("ID_6", "Area6")
Areas5 <- cbind(Paths$ID_5, Paths$Area_5)
colnames(Areas5) <- c("ID_5", "Area5")
Areas4 <- cbind(Paths$ID_4, Paths$Area_4)
colnames(Areas4) <- c("ID_4", "Area4")
Areas3 <- cbind(Paths$ID_3, Paths$Area_3)
colnames(Areas3) <- c("ID_3", "Area3")
Areas2 <- cbind(Paths$ID_2, Paths$Area_2)
colnames(Areas2) <- c("ID_2", "Area2")
Areas1 <- cbind(Paths$ID_1, Paths$Area_1)
colnames(Areas1) <- c("ID_1", "Area1")
Areas0 <- cbind(Paths$ID, Paths$Area)
colnames(Areas0) <- c("ID", "Area")
#Use the below to modify the plant and bird files
#remove all ID_6 that are smaller than the next grid size down (Area of ID_6 >= 1600000000)
AreasRemove6 <- subset(Areas6, Areas6[,2] >= 1600000000)
length(AreasRemove6[,1])
Areas6 <- AreasRemove6
#remove all of ID_5 that are smaller than the next grid size down (Area of ID_5 >= 400000000)
AreasRemove5 <- subset(Areas5, Areas5[,2] >= 400000000)
length(AreasRemove5[,1])
Areas5 <- AreasRemove5
#remove all of ID_4 that are smaller than the next grid size down (Area of ID_4 >= 100000000)
AreasRemove4 <- subset(Areas4, Areas4[,2] >= 100000000)
length(AreasRemove4[,1])
Areas4 <- AreasRemove4
#remove all of ID_3 that are smaller than the next grid size down (Area of ID_3 >= 25000000)
AreasRemove3 <- subset(Areas3, Areas3[,2] >= 25000000)
length(AreasRemove3[,1])
Areas3 <- AreasRemove3
#remove all of ID_2 that are smaller than the next grid size down (Area of ID_2 >= 6250000)
AreasRemove2 <- subset(Areas2, Areas2[,2] >= 6250000)
length(AreasRemove2[,1])
Areas2 <- AreasRemove2
#remove all of ID_1 that are smaller than the next grid size down (Area of ID_1 >= 1562500)
AreasRemove1 <- subset(Areas1, Areas1[,2] >= 1562500)
length(AreasRemove1[,1])
Areas1 <- AreasRemove1
#remove all of ID that are smaller than 1562500 (Area of ID_ >= 1562500)
AreasRemove0 <- subset(Areas0, Areas0[,2] >= 1562500)
length(AreasRemove0[,1])
Areas0 <- AreasRemove0
#separate into a file for each grid size - 6400 (Grid6), 1600 (Grid5), 400 (Grid4), 100 (Grid3), 25 (Grid2), 6.25 (Grid1), 1.5625 (Grid0) - and table of GridID (columns) by Spp (rows)
PlantsGrid6 <- table(Plants$NameCode, Plants$ID_6)
PlantsGrid5 <- table(Plants$NameCode, Plants$ID_5)
PlantsGrid4 <- table(Plants$NameCode, Plants$ID_4)
PlantsGrid3 <- table(Plants$NameCode, Plants$ID_3)
PlantsGrid2 <- table(Plants$NameCode, Plants$ID_2)
PlantsGrid1 <- table(Plants$NameCode, Plants$ID_1)
PlantsGrid0 <- table(Plants$NameCode, Plants$ID)
#check density of plant sampling in each grid cell
x <- apply(PlantsGrid6, 2, sum)
PlantsArea6 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea6[,1] <- as.numeric(colnames(PlantsGrid6))
PlantsArea6[,2] <- x
colnames(PlantsArea6) <- c("ID_6", "SppNum")
Temp <- merge(PlantsArea6, Areas6)
Plants6 <- unique(Temp)
x <- apply(PlantsGrid5, 2, sum)
PlantsArea5 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea5[,1] <- as.numeric(colnames(PlantsGrid5))
PlantsArea5[,2] <- x
colnames(PlantsArea5) <- c("ID_5", "SppNum")
Temp <- merge(PlantsArea5, Areas5)
Plants5 <- unique(Temp)
x <- apply(PlantsGrid4, 2, sum)
PlantsArea4 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea4[,1] <- as.numeric(colnames(PlantsGrid4))
PlantsArea4[,2] <- x
colnames(PlantsArea4) <- c("ID_4", "SppNum")
Temp <- merge(PlantsArea4, Areas4)
Plants4 <- unique(Temp)
x <- apply(PlantsGrid3, 2, sum)
PlantsArea3 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea3[,1] <- as.numeric(colnames(PlantsGrid3))
PlantsArea3[,2] <- x
colnames(PlantsArea3) <- c("ID_3", "SppNum")
Temp <- merge(PlantsArea3, Areas3)
Plants3 <- unique(Temp)
x <- apply(PlantsGrid2, 2, sum)
PlantsArea2 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea2[,1] <- as.numeric(colnames(PlantsGrid2))
PlantsArea2[,2] <- x
colnames(PlantsArea2) <- c("ID_2", "SppNum")
Temp <- merge(PlantsArea2, Areas2)
Plants2 <- unique(Temp)
x <- apply(PlantsGrid1, 2, sum)
PlantsArea1 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea1[,1] <- as.numeric(colnames(PlantsGrid1))
PlantsArea1[,2] <- x
colnames(PlantsArea1) <- c("ID_1", "SppNum")
Temp <- merge(PlantsArea1, Areas1)
Plants1 <- unique(Temp)
x <- apply(PlantsGrid0, 2, sum)
PlantsArea0 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea0[,1] <- as.numeric(colnames(PlantsGrid0))
PlantsArea0[,2] <- x
colnames(PlantsArea0) <- c("ID", "SppNum")
Temp <- merge(PlantsArea0, Areas0)
Plants0 <- unique(Temp)
#remove the upper and lower (2.5%) quantiles of areas sampled
Density <- Plants6[,2]/Plants6[,3]
Plants6 <- cbind(Plants6, Density)
UQ <- quantile(Plants6[,2]/Plants6[,3], probs=0.975)
LQ <- quantile(Plants6[,2]/Plants6[,3], probs=0.025)
Plants6 <- Plants6[which(Plants6$Density>LQ),]
Plants6 <- Plants6[which(Plants6$Density<UQ),]
Plants <- Plants[which(Plants$ID_6 %in% Plants6$ID_6),]
Density <- Plants5[,2]/Plants5[,3]
Plants5 <- cbind(Plants5, Density)
UQ <- quantile(Plants5[,2]/Plants5[,3], probs=0.975)
LQ <- quantile(Plants5[,2]/Plants5[,3], probs=0.025)
Plants5 <- Plants5[which(Plants5$Density>LQ),]
Plants5 <- Plants5[which(Plants5$Density<UQ),]
Plants <- Plants[which(Plants$ID_5 %in% Plants5$ID_5),]
Density <- Plants4[,2]/Plants4[,3]
Plants4 <- cbind(Plants4, Density)
UQ <- quantile(Plants4[,2]/Plants4[,3], probs=0.975)
LQ <- quantile(Plants4[,2]/Plants4[,3], probs=0.025)
Plants4 <- Plants4[which(Plants4$Density>LQ),]
Plants4 <- Plants4[which(Plants4$Density<UQ),]
Plants <- Plants[which(Plants$ID_4 %in% Plants4$ID_4),]
Density <- Plants3[,2]/Plants3[,3]
Plants3 <- cbind(Plants3, Density)
UQ <- quantile(Plants3[,2]/Plants3[,3], probs=0.975)
LQ <- quantile(Plants3[,2]/Plants3[,3], probs=0.025)
Plants3 <- Plants3[which(Plants3$Density>LQ),]
Plants3 <- Plants3[which(Plants3$Density<UQ),]
Plants <- Plants[which(Plants$ID_3 %in% Plants3$ID_3),]
Density <- Plants2[,2]/Plants2[,3]
Plants2 <- cbind(Plants2, Density)
UQ <- quantile(Plants2[,2]/Plants2[,3], probs=0.975)
LQ <- quantile(Plants2[,2]/Plants2[,3], probs=0.025)
Plants2 <- Plants2[which(Plants2$Density>LQ),]
Plants2 <- Plants2[which(Plants2$Density<UQ),]
Plants <- Plants[which(Plants$ID_2 %in% Plants2$ID_2),]
Density <- Plants1[,2]/Plants1[,3]
Plants1 <- cbind(Plants1, Density)
UQ <- quantile(Plants1[,2]/Plants1[,3], probs=0.975)
LQ <- quantile(Plants1[,2]/Plants1[,3], probs=0.025)
Plants1 <- Plants1[which(Plants1$Density>LQ),]
Plants1 <- Plants1[which(Plants1$Density<UQ),]
Plants <- Plants[which(Plants$ID_1 %in% Plants1$ID_1),]
Density <- Plants0[,2]/Plants0[,3]
Plants0 <- cbind(Plants0, Density)
UQ <- quantile(Plants0[,2]/Plants0[,3], probs=0.975)
LQ <- quantile(Plants0[,2]/Plants0[,3], probs=0.025)
Plants0 <- Plants0[which(Plants0$Density>LQ),]
Plants0 <- Plants0[which(Plants0$Density<UQ),]
Plants <- Plants[which(Plants$ID %in% Plants0$ID),]
#Redo the plant tables to incorporate changes
PlantsGrid6 <- table(Plants$NameCode, Plants$ID_6)
PlantsGrid5 <- table(Plants$NameCode, Plants$ID_5)
PlantsGrid4 <- table(Plants$NameCode, Plants$ID_4)
PlantsGrid3 <- table(Plants$NameCode, Plants$ID_3)
PlantsGrid2 <- table(Plants$NameCode, Plants$ID_2)
PlantsGrid1 <- table(Plants$NameCode, Plants$ID_1)
PlantsGrid0 <- table(Plants$NameCode, Plants$ID)
#the above gives number of INDIVIDUALS of each spp - need to translate it to 0's and 1's so that just counting each spp ONCE (pres/abs)
PlantsGrid6 <- ifelse(PlantsGrid6>=1, 1, 0)
PlantsGrid5 <- ifelse(PlantsGrid5>=1, 1, 0)
PlantsGrid4 <- ifelse(PlantsGrid4>=1, 1, 0)
PlantsGrid3 <- ifelse(PlantsGrid3>=1, 1, 0)
PlantsGrid2 <- ifelse(PlantsGrid2>=1, 1, 0)
PlantsGrid1 <- ifelse(PlantsGrid1>=1, 1, 0)
PlantsGrid0 <- ifelse(PlantsGrid0>=1, 1, 0)
BirdsGrid6 <- table(Birds$NameCode, Birds$ID_6)
BirdsGrid5 <- table(Birds$NameCode, Birds$ID_5)
BirdsGrid4 <- table(Birds$NameCode, Birds$ID_4)
BirdsGrid3 <- table(Birds$NameCode, Birds$ID_3)
BirdsGrid2 <- table(Birds$NameCode, Birds$ID_2)
BirdsGrid1 <- table(Birds$NameCode, Birds$ID_1)
BirdsGrid0 <- table(Birds$NameCode, Birds$ID)
#check density of bird sampling in each grid cell
x <- apply(BirdsGrid6, 2, sum)
BirdsArea6 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea6[,1] <- as.numeric(colnames(BirdsGrid6))
BirdsArea6[,2] <- x
colnames(BirdsArea6) <- c("ID_6", "SppNum")
Temp <- merge(BirdsArea6, Areas6)
Birds6 <- unique(Temp)
x <- apply(BirdsGrid5, 2, sum)
BirdsArea5 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea5[,1] <- as.numeric(colnames(BirdsGrid5))
BirdsArea5[,2] <- x
colnames(BirdsArea5) <- c("ID_5", "SppNum")
Temp <- merge(BirdsArea5, Areas5)
Birds5 <- unique(Temp)
x <- apply(BirdsGrid4, 2, sum)
BirdsArea4 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea4[,1] <- as.numeric(colnames(BirdsGrid4))
BirdsArea4[,2] <- x
colnames(BirdsArea4) <- c("ID_4", "SppNum")
Temp <- merge(BirdsArea4, Areas4)
Birds4 <- unique(Temp)
x <- apply(BirdsGrid3, 2, sum)
BirdsArea3 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea3[,1] <- as.numeric(colnames(BirdsGrid3))
BirdsArea3[,2] <- x
colnames(BirdsArea3) <- c("ID_3", "SppNum")
Temp <- merge(BirdsArea3, Areas3)
Birds3 <- unique(Temp)
x <- apply(BirdsGrid2, 2, sum)
BirdsArea2 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea2[,1] <- as.numeric(colnames(BirdsGrid2))
BirdsArea2[,2] <- x
colnames(BirdsArea2) <- c("ID_2", "SppNum")
Temp <- merge(BirdsArea2, Areas2)
Birds2 <- unique(Temp)
x <- apply(BirdsGrid1, 2, sum)
BirdsArea1 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea1[,1] <- as.numeric(colnames(BirdsGrid1))
BirdsArea1[,2] <- x
colnames(BirdsArea1) <- c("ID_1", "SppNum")
Temp <- merge(BirdsArea1, Areas1)
Birds1 <- unique(Temp)
x <- apply(BirdsGrid0, 2, sum)
BirdsArea0 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea0[,1] <- as.numeric(colnames(BirdsGrid0))
BirdsArea0[,2] <- x
colnames(BirdsArea0) <- c("ID", "SppNum")
Temp <- merge(BirdsArea0, Areas0)
Birds0 <- unique(Temp)
#remove the upper and lower (2.5%) quantiles of areas sampled
Density <- Birds6[,2]/Birds6[,3]
Birds6 <- cbind(Birds6, Density)
UQ <- quantile(Birds6[,2]/Birds6[,3], probs=0.975)
LQ <- quantile(Birds6[,2]/Birds6[,3], probs=0.025)
Birds6 <- Birds6[which(Birds6$Density>LQ),]
Birds6 <- Birds6[which(Birds6$Density<UQ),]
Birds <- Birds[which(Birds$ID_6 %in% Birds6$ID_6),]
Density <- Birds5[,2]/Birds5[,3]
Birds5 <- cbind(Birds5, Density)
UQ <- quantile(Birds5[,2]/Birds5[,3], probs=0.975)
LQ <- quantile(Birds5[,2]/Birds5[,3], probs=0.025)
Birds5 <- Birds5[which(Birds5$Density>LQ),]
Birds5 <- Birds5[which(Birds5$Density<UQ),]
Birds <- Birds[which(Birds$ID_5 %in% Birds5$ID_5),]
Density <- Birds4[,2]/Birds4[,3]
Birds4 <- cbind(Birds4, Density)
UQ <- quantile(Birds4[,2]/Birds4[,3], probs=0.975)
LQ <- quantile(Birds4[,2]/Birds4[,3], probs=0.025)
Birds4 <- Birds4[which(Birds4$Density>LQ),]
Birds4 <- Birds4[which(Birds4$Density<UQ),]
Birds <- Birds[which(Birds$ID_4 %in% Birds4$ID_4),]
Density <- Birds3[,2]/Birds3[,3]
Birds3 <- cbind(Birds3, Density)
UQ <- quantile(Birds3[,2]/Birds3[,3], probs=0.975)
LQ <- quantile(Birds3[,2]/Birds3[,3], probs=0.025)
Birds3 <- Birds3[which(Birds3$Density>LQ),]
Birds3 <- Birds3[which(Birds3$Density<UQ),]
Birds <- Birds[which(Birds$ID_3 %in% Birds3$ID_3),]
Density <- Birds2[,2]/Birds2[,3]
Birds2 <- cbind(Birds2, Density)
UQ <- quantile(Birds2[,2]/Birds2[,3], probs=0.975)
LQ <- quantile(Birds2[,2]/Birds2[,3], probs=0.025)
Birds2 <- Birds2[which(Birds2$Density>LQ),]
Birds2 <- Birds2[which(Birds2$Density<UQ),]
Birds <- Birds[which(Birds$ID_2 %in% Birds2$ID_2),]
Density <- Birds1[,2]/Birds1[,3]
Birds1 <- cbind(Birds1, Density)
UQ <- quantile(Birds1[,2]/Birds1[,3], probs=0.975)
LQ <- quantile(Birds1[,2]/Birds1[,3], probs=0.025)
Birds1 <- Birds1[which(Birds1$Density>LQ),]
Birds1 <- Birds1[which(Birds1$Density<UQ),]
Birds <- Birds[which(Birds$ID_1 %in% Birds1$ID_1),]
Density <- Birds0[,2]/Birds0[,3]
Birds0 <- cbind(Birds0, Density)
UQ <- quantile(Birds0[,2]/Birds0[,3], probs=0.975)
LQ <- quantile(Birds0[,2]/Birds0[,3], probs=0.025)
Birds0 <- Birds0[which(Birds0$Density>LQ),]
Birds0 <- Birds0[which(Birds0$Density<UQ),]
Birds <- Birds[which(Birds$ID %in% Birds0$ID),]
#Have to redo the tables to incorporate changes
BirdsGrid6 <- table(Birds$NameCode, Birds$ID_6)
BirdsGrid5 <- table(Birds$NameCode, Birds$ID_5)
BirdsGrid4 <- table(Birds$NameCode, Birds$ID_4)
BirdsGrid3 <- table(Birds$NameCode, Birds$ID_3)
BirdsGrid2 <- table(Birds$NameCode, Birds$ID_2)
BirdsGrid1 <- table(Birds$NameCode, Birds$ID_1)
BirdsGrid0 <- table(Birds$NameCode, Birds$ID)
BirdsGrid6 <- ifelse(BirdsGrid6>=1, 1, 0)
BirdsGrid5 <- ifelse(BirdsGrid5>=1, 1, 0)
BirdsGrid4 <- ifelse(BirdsGrid4>=1, 1, 0)
BirdsGrid3 <- ifelse(BirdsGrid3>=1, 1, 0)
BirdsGrid2 <- ifelse(BirdsGrid2>=1, 1, 0)
BirdsGrid1 <- ifelse(BirdsGrid1>=1, 1, 0)
BirdsGrid0 <- ifelse(BirdsGrid0>=1, 1, 0)
#Run the path analysis
Paths <- read.csv("GridPathForAnalysis.csv")
PathsRed6 <- Paths[which(Paths$ID_6 %in% colnames(BirdsGrid6) & Paths$ID_6 %in% colnames(PlantsGrid6)), ]
PathsRed5 <- PathsRed6[which(PathsRed6$ID_5 %in% colnames(BirdsGrid5) & PathsRed6$ID_5 %in% colnames(PlantsGrid5)), ]
PathsRed4 <- PathsRed5[which(PathsRed5$ID_4 %in% colnames(BirdsGrid4) & PathsRed5$ID_4 %in% colnames(PlantsGrid4)), ]
PathsRed3 <- PathsRed4[which(PathsRed4$ID_3 %in% colnames(BirdsGrid3) & PathsRed4$ID_3 %in% colnames(PlantsGrid3)), ]
PathsRed2 <- PathsRed3[which(PathsRed3$ID_2 %in% colnames(BirdsGrid2) & PathsRed3$ID_2 %in% colnames(PlantsGrid2)), ]
PathsRed1 <- PathsRed2[which(PathsRed2$ID_1 %in% colnames(BirdsGrid1) & PathsRed2$ID_1 %in% colnames(PlantsGrid1)), ]
PathsRed0 <- PathsRed1[which(PathsRed1$ID %in% colnames(BirdsGrid0) & PathsRed1$ID %in% colnames(PlantsGrid0)), ] #number of rows here is used below - (129)
#save for area
PathsRed0AREA <- PathsRed0
PathsRed0 <- PathsRed0[,c(5, 11, 17, 23, 29, 35, 41)]
Orphaned <- list()
OrphanedBIRD <- list()
Lost <- list()
LostBIRD <- list()
Original <- list()
Remain <- list()
#use the 1st (or whatever) row of paths to look up the correct (matching ID) col in the proper Veg file and the correct (matching ID) col in the proper Bird file
Veg1 <- list()
Veg2 <- list()
Veg3 <- list()
Veg4 <- list()
Veg5 <- list()
Veg6 <- list()
Veg7 <- list()
Bird1 <- list()
Bird2 <- list()
Bird3 <- list()
Bird4 <- list()
Bird5 <- list()
Bird6 <- list()
Bird7 <- list()
Network1 <- list()
Network2 <- list()
Network3 <- list()
Network4 <- list()
Network5 <- list()
Network6 <- list()
Network7 <- list()
Network1R <- list()
Network2R <- list()
Network3R <- list()
Network4R <- list()
Network5R <- list()
Network6R <- list()
Network7R <- list()
x1 <- list()
x2 <- list()
x3 <- list()
x4 <- list()
x5 <- list()
x6 <- list()
y1 <- list()
y2 <- list()
y3 <- list()
y4 <- list()
y5 <- list()
y6 <- list()
y7 <- list()
y8 <- list()
y9 <- list()
y10 <- list()
y11 <- list()
y12 <- list()
y13 <- list()
y14 <- list()
y15 <- list()
y16 <- list()
y17 <- list()
y18 <- list()
y19 <- list()
y20 <- list()
y21 <- list()
temp1 <- list()
temp2 <- list()
temp3 <- list()
temp4 <- list()
temp5 <- list()
temp6 <- list()
temp7 <- list()
temp8 <- list()
temp9 <- list()
temp10 <- list()
temp11 <- list()
temp12 <- list()
temp13 <- list()
temp14 <- list()
temp15 <- list()
temp16 <- list()
temp17 <- list()
temp18 <- list()
temp19 <- list()
temp20 <- list()
temp21 <- list()
for(j in 1:129){
Veg1[[j]] <- as.data.frame(PlantsGrid6[,c(1,which(colnames(PlantsGrid6)==PathsRed0[j,7]))]) #col 7 is ID_6, which is Grid Size 6400
Veg1[[j]] <- Veg1[[j]][which(Veg1[[j]][,2]==1),] #uses only the spp that are present
Bird1[[j]] <- as.data.frame(BirdsGrid6[,c(1,which(colnames(BirdsGrid6)==PathsRed0[j,7]))]) #col 7 is ID_6, which is Grid Size 6400
Bird1[[j]] <- Bird1[[j]][which(Bird1[[j]][,2]==1),]
#construct initial network (Grid Size 6400)
#merge Veg with the Full Network to reduce to just Veg spp present
Network1[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg1[[j]])),]
#Reduce the network to just bird species that are present
Network1R[[j]] <- Network1[[j]][,c(1,which(colnames(Network1[[j]])[-c(1:2)] %in% rownames(Bird1[[j]]))+2)]
#construct next network (Grid Size 1600)
Veg2[[j]] <- as.data.frame(PlantsGrid5[,c(1,which(colnames(PlantsGrid5)==PathsRed0[j,6]))]) #col 6 is ID_5, which is Grid Size 1600
Veg2[[j]] <- Veg2[[j]][which(Veg2[[j]][,2]==1),]
Bird2[[j]] <- as.data.frame(BirdsGrid5[,c(1,which(colnames(BirdsGrid5)==PathsRed0[j,6]))]) #col 6 is ID_5, which is Grid Size 1600
Bird2[[j]] <- Bird2[[j]][which(Bird2[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network2[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg2[[j]])),]
#Reduce the network to just bird species that are present
Network2R[[j]] <- Network2[[j]][,c(1,which(colnames(Network2[[j]])[-c(1:2)] %in% rownames(Bird2[[j]]))+2)]
#construct next network (Grid Size 400)
Veg3[[j]] <- as.data.frame(PlantsGrid4[,c(1,which(colnames(PlantsGrid4)==PathsRed0[j,5]))]) #col 5 is ID_4, which is Grid Size 400
Veg3[[j]] <- Veg3[[j]][which(Veg3[[j]][,2]==1),]
Bird3[[j]] <- as.data.frame(BirdsGrid4[,c(1,which(colnames(BirdsGrid4)==PathsRed0[j,5]))]) #col 5 is ID_4, which is Grid Size 400
Bird3[[j]] <- Bird3[[j]][which(Bird3[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network3[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg3[[j]])),]
#Reduce the network to just bird species that are present
Network3R[[j]] <- Network3[[j]][,c(1,which(colnames(Network3[[j]])[-c(1:2)] %in% rownames(Bird3[[j]]))+2)]
#construct next network (Grid Size 100)
Veg4[[j]] <- as.data.frame(PlantsGrid3[,c(1,which(colnames(PlantsGrid3)==PathsRed0[j,4]))]) #col 4 is ID_3, Grid Size 100
Veg4[[j]] <- Veg4[[j]][which(Veg4[[j]][,2]==1),]
Bird4[[j]] <- as.data.frame(BirdsGrid3[,c(1,which(colnames(BirdsGrid3)==PathsRed0[j,4]))]) #col 4 is ID_3, Grid Size 100
Bird4[[j]] <- Bird4[[j]][which(Bird4[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network4[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg4[[j]])),]
#Reduce the network to just bird species that are present
Network4R[[j]] <- Network4[[j]][,c(1,which(colnames(Network4[[j]])[-c(1:2)] %in% rownames(Bird4[[j]]))+2)]
#construct next network (Grid Size 25)
Veg5[[j]] <- as.data.frame(PlantsGrid2[,c(1,which(colnames(PlantsGrid2)==PathsRed0[j,3]))]) #col 3 is ID_2, which is Grid Size 25
Veg5[[j]] <- Veg5[[j]][which(Veg5[[j]][,2]==1),]
Bird5[[j]] <- as.data.frame(BirdsGrid2[,c(1,which(colnames(BirdsGrid2)==PathsRed0[j,3]))]) #col 3 is ID_2, which is Grid Size 25
Bird5[[j]] <- Bird5[[j]][which(Bird5[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network5[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg5[[j]])),]
#Reduce the network to just bird species that are present
Network5R[[j]] <- Network5[[j]][,c(1,which(colnames(Network5[[j]])[-c(1:2)] %in% rownames(Bird5[[j]]))+2)]
#construct next network (Grid Size 6.25)
Veg6[[j]] <- as.data.frame(PlantsGrid1[,c(1,which(colnames(PlantsGrid1)==PathsRed0[j,2]))]) #col 2 is ID_1, which is Grid Size 6.25
Veg6[[j]] <- Veg6[[j]][which(Veg6[[j]][,2]==1),]
Bird6[[j]] <- as.data.frame(BirdsGrid1[,c(1,which(colnames(BirdsGrid1)==PathsRed0[j,2]))]) #col 2 is ID_1, which is Grid Size 6.25
Bird6[[j]] <- Bird6[[j]][which(Bird6[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network6[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg6[[j]])),]
#Reduce the network to just bird species that are present
Network6R[[j]] <- Network6[[j]][,c(1,which(colnames(Network6[[j]])[-c(1:2)] %in% rownames(Bird6[[j]]))+2)]
Network6R[[j]] <- as.data.frame(Network6R[[j]])
#construct next Network (1.5625)
Veg7[[j]] <- as.data.frame(PlantsGrid0[,c(1,which(colnames(PlantsGrid0)==PathsRed0[j,1]))]) #col 1 is ID, which is Grid Size 1.5625
Veg7[[j]] <- Veg7[[j]][which(Veg7[[j]][,2]==1),]
Bird7[[j]] <- as.data.frame(BirdsGrid0[,c(1,which(colnames(BirdsGrid0)==PathsRed0[j,1]))]) #col 1 is ID, which is Grid Size 1.5625
Bird7[[j]] <- Bird7[[j]][which(Bird7[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network7[[j]] <- as.data.frame(FN[which(FN$Scientific.Name %in% rownames(Veg7[[j]])),])
#Reduce the network to just bird species that are present
Network7R[[j]] <- Network7[[j]][,c(1,which(colnames(Network7[[j]])[-c(1:2)] %in% rownames(Bird7[[j]]))+2)]
Network7R[[j]] <- as.data.frame(Network7R[[j]])
}
for(j in 1:129){
#count initial "Orphaned" plant species
Orphaned[[j]] <- mat.or.vec(nr=1, nc=7)
#take out the rows that were 0 (Orphaned spp) in the previous networks
if (length(which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0))>0){
Network2R[[j]] <- subset(Network2R[[j]], Network2R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network3R[[j]] <- subset(Network3R[[j]], Network3R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network4R[[j]] <- subset(Network4R[[j]], Network4R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0))>0){
Network3R[[j]] <- subset(Network3R[[j]], Network3R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network4R[[j]] <- subset(Network4R[[j]], Network4R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0))>0){
Network4R[[j]] <- subset(Network4R[[j]], Network4R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0))>0){
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network4R[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network4R[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network4R[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0))>0){
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network5R[[j]][which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network5R[[j]][which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0),1])
}
}
#22 is empty for Network6R, #36 throws a warning message (species only present in one taxonomic group)
N <- c(1:129)
N <- N[-c(22,36)]
for(j in 1:127){
if (length(which(apply(data.matrix(Network6R[[N[j]]][,-1]), 1, sum)==0))>0){
Network7R[[N[j]]] <- subset(Network7R[[N[j]]], Network7R[[N[j]]][,1]!=
Network6R[[N[j]]][which(apply(data.matrix(Network6R[[N[j]]][,-1]), 1, sum)==0),1])
}
}
#because #36 throws a warning message
Network7R[[36]] <- Network6R[[36]][2,]
#take out the COLUMNS that were 0 (Orphaned spp) in the previous networks
for(j in 1:129){
if (length(which(apply(data.matrix(Network1R[[j]][,-1]), 2, sum)==0))>0){
x1[[j]] <- colnames(Network1R[[j]][,-1])
x1[[j]] <- x1[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 2, sum)==0)]
y1[[j]] <- colnames(Network2R[[j]])
temp1[[j]] <- which(y1[[j]] %in% x1[[j]])
if(length(temp1[[j]]>0)){
Network2R[[j]] <- Network2R[[j]][,-temp1[[j]]]
}
y2[[j]] <- colnames(Network3R[[j]])
temp2[[j]] <- which(y2[[j]] %in% x1[[j]])
if(length(temp2[[j]]>0)){
Network3R[[j]] <- Network3R[[j]][,-temp2[[j]]]
}
y3[[j]] <- colnames(Network4R[[j]])
temp3[[j]] <- which(y3[[j]] %in% x1[[j]])
if(length(temp3[[j]]>0)){
Network4R[[j]] <- Network4R[[j]][,-temp3[[j]]]
}
y4[[j]] <- colnames(Network5R[[j]])
temp4[[j]] <- which(y4[[j]] %in% x1[[j]])
if(length(temp4[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp4[[j]]]
}
y5[[j]] <- colnames(Network6R[[j]])
temp5[[j]] <- which(y5[[j]] %in% x1[[j]])
if(length(temp5[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp5[[j]]]
}
y6[[j]] <- colnames(Network7R[[j]])
temp6[[j]] <- which(y6[[j]] %in% x1[[j]])
if(length(temp6[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp6[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network2R[[j]][,-1]), 2, sum)==0))>0){
x2[[j]] <- colnames(Network2R[[j]][,-1])
x2[[j]] <- x2[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 2, sum)==0)]
y7[[j]] <- colnames(Network3R[[j]])
temp7[[j]] <- which(y7[[j]] %in% x2[[j]])
if(length(temp7[[j]]>0)){
Network3R[[j]] <- Network3R[[j]][,-temp7[[j]]]
}
y8[[j]] <- colnames(Network4R[[j]])
temp8[[j]] <- which(y8[[j]] %in% x2[[j]])
if(length(temp8[[j]]>0)){
Network4R[[j]] <- Network4R[[j]][,-temp8[[j]]]
}
y9[[j]] <- colnames(Network5R[[j]])
temp9[[j]] <- which(y9[[j]] %in% x2[[j]])
if(length(temp9[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp9[[j]]]
}
y10[[j]] <- colnames(Network6R[[j]])
temp10[[j]] <- which(y10[[j]] %in% x2[[j]])
if(length(temp10[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp10[[j]]]
}
y11[[j]] <- colnames(Network7R[[j]])
temp11[[j]] <- which(y11[[j]] %in% x2[[j]])
if(length(temp11[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp11[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network3R[[j]][,-1]), 2, sum)==0))>0){
x3[[j]] <- colnames(Network3R[[j]][,-1])
x3[[j]] <- x3[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 2, sum)==0)]
y12[[j]] <- colnames(Network4R[[j]])
temp12[[j]] <- which(y12[[j]] %in% x3[[j]])
if(length(temp12[[j]]>0)){
Network4R[[j]] <- Network4R[[j]][,-temp12[[j]]]
}
y13[[j]] <- colnames(Network5R[[j]])
temp13[[j]] <- which(y13[[j]] %in% x3[[j]])
if(length(temp13[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp13[[j]]]
}
y14[[j]] <- colnames(Network6R[[j]])
temp14[[j]] <- which(y14[[j]] %in% x3[[j]])
if(length(temp14[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp14[[j]]]
}
y15[[j]] <- colnames(Network7R[[j]])
temp15[[j]] <- which(y15[[j]] %in% x3[[j]])
if(length(temp15[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp15[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0))>0){
x4[[j]] <- colnames(Network4R[[j]][,-1])
x4[[j]] <- x4[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0)]
y16[[j]] <- colnames(Network5R[[j]])
temp16[[j]] <- which(y16[[j]] %in% x4[[j]])
if(length(temp16[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp16[[j]]]
}
}
}
#Network6R[[68]] is empty. But Network6R[[22]] is also empty and does not give warning messages.
Network6R[[68]] <- Network6R[[22]]
Network7R[[68]] <- Network7R[[22]]
for(j in 1:129){
if (length(which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0))>0){
y17[[j]] <- colnames(Network6R[[j]])
temp17[[j]] <- which(y17[[j]] %in% x4[[j]])
if(length(temp17[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp17[[j]]]
}
y18[[j]] <- colnames(Network7R[[j]])
temp18[[j]] <- which(y18[[j]] %in% x4[[j]])
if(length(temp18[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp18[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0))>0){
x5[[j]] <- colnames(Network5R[[j]])[-1]
x5[[j]] <- x5[[j]][which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0)]
y19[[j]] <- colnames(Network6R[[j]])
temp19[[j]] <- which(y19[[j]] %in% x5[[j]])
if(length(temp19[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp19[[j]]]
}
}
}
#Network7R[[19]] and Network7R[[18]] are empty. But Network7R[[22]] is also empty and does not give warning messages.
Network7R[[19]] <- Network7R[[22]]
Network7R[[18]] <- Network7R[[22]]
for(j in 1:129){
if (length(which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0))>0){
y20[[j]] <- colnames(Network7R[[j]])
temp20[[j]] <- which(y20[[j]] %in% x5[[j]])
if(length(temp20[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp20[[j]]]
}
}
}
#Network6R[[3]] is empty. But Network7R[[68]] is also empty and does not give warning messages.
Network6R[[3]] <- Network6R[[68]]
Network7R[[3]] <- Network7R[[68]]
Network6R[[18]] <- Network6R[[68]]
Network7R[[18]] <- Network7R[[68]]
Network6R[[19]] <- Network6R[[68]]
Network7R[[19]] <- Network7R[[68]]
Network6R[[22]] <- Network6R[[68]]
Network7R[[22]] <- Network7R[[68]]
for(j in 1:129){
if (length(which(apply(data.matrix(Network6R[[j]][,-1]), 1, sum)==0))>0){
x6[[j]] <- colnames(Network6R[[j]])[-1]
x6[[j]] <- x6[[j]][which(apply(data.matrix(Network6R[[j]][,-1]), 2, sum)==0)]
y21[[j]] <- colnames(Network7R[[j]])
temp21[[j]] <- which(y21[[j]] %in% x6[[j]])
if(length(temp21[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp21[[j]]]
}
}
}
#Fix any of the other empty 7R networks by replacing with an empty network that does not give warning messages.
Network7R[[13]] <- Network7R[[22]]
Network7R[[97]] <- Network7R[[22]]
Network7R[[102]] <- Network7R[[22]]
Network7R[[110]] <- Network7R[[22]]
Network7R[[112]] <- Network7R[[22]]
for(j in 1:129){
Orphaned[[j]] <- mat.or.vec(nr=1, nc=7)
#count plant Orphaned species
Orphaned[[j]][1] <- length(which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][2] <- length(which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][3] <- length(which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][4] <- length(which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][5] <- length(which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][6] <- length(which(apply(data.matrix(Network6R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][7] <- length(which(apply(data.matrix(Network7R[[j]][,-1]), 1, sum)==0))
}
for(j in 1:129){
#count bird Orphaned species
OrphanedBIRD[[j]] <- mat.or.vec(nr=1, nc=7)
OrphanedBIRD[[j]][1] <- length(which(apply(data.matrix(Network1R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][2] <- length(which(apply(data.matrix(Network2R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][3] <- length(which(apply(data.matrix(Network3R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][4] <- length(which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][5] <- length(which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][6] <- length(which(apply(data.matrix(Network6R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][7] <- length(which(apply(data.matrix(Network7R[[j]][,-1]), 2, sum)==0))
#count lost (extinct) plant species
Lost[[j]] <- mat.or.vec(nr=1, nc=6)
#count lost (extinct) PLANT species
Lost[[j]][1] <- length(Network1R[[j]][,1])-length(Network2R[[j]][,1])
Lost[[j]][2] <- length(Network2R[[j]][,1])-length(Network3R[[j]][,1])
Lost[[j]][3] <- length(Network3R[[j]][,1])-length(Network4R[[j]][,1])
Lost[[j]][4] <- length(Network4R[[j]][,1])-length(Network5R[[j]][,1])
Lost[[j]][5] <- length(Network5R[[j]][,1])-length(Network6R[[j]][,1])
Lost[[j]][6] <- length(Network6R[[j]][,1])-length(Network7R[[j]][,1])
#count lost (extinct) bird species
LostBIRD[[j]] <- mat.or.vec(nr=1, nc=6)
#count lost (extinct) BIRD species
LostBIRD[[j]][1] <- length(Network1R[[j]][1,])-length(Network2R[[j]][1,])
LostBIRD[[j]][2] <- length(Network2R[[j]][1,])-length(Network3R[[j]][1,])
LostBIRD[[j]][3] <- length(Network3R[[j]][1,])-length(Network4R[[j]][1,])
LostBIRD[[j]][4] <- length(Network4R[[j]][1,])-length(Network5R[[j]][1,])
LostBIRD[[j]][5] <- length(Network5R[[j]][1,])-length(Network6R[[j]][1,])
LostBIRD[[j]][6] <- length(Network6R[[j]][1,])-length(Network7R[[j]][1,])
Original[[j]] <- mat.or.vec(nr=1, nc=6)
Original[[j]][1] <- length(Network1R[[j]][,1])
Original[[j]][2] <- length(Network2R[[j]][,1])
Original[[j]][3] <- length(Network3R[[j]][,1])
Original[[j]][4] <- length(Network4R[[j]][,1])
Original[[j]][5] <- length(Network5R[[j]][,1])
Original[[j]][6] <- length(Network6R[[j]][,1])
#count initial "remaining" plant species
Remain[[j]] <- mat.or.vec(nr=1, nc=6)
#count remaining species
Remain[[j]][1] <- length(Network2R[[j]][,1])
Remain[[j]][2] <- length(Network3R[[j]][,1])
Remain[[j]][3] <- length(Network4R[[j]][,1])
Remain[[j]][4] <- length(Network5R[[j]][,1])
Remain[[j]][5] <- length(Network6R[[j]][,1])
Remain[[j]][6] <- length(Network7R[[j]][,1])
}
#this is the end of the 129 simulations
#when networks collapsed to no species within the simulations, we had to manually input the number of extinct and orphaned species
#these steps are necessary because numbers of species lost and orphaned for these networks is incorrect otherwise
Lost[[68]][,5] <- 1 #at 6, 1 species is lost, 2 are orphaned
Lost[[68]][,6] <- 2 #at 7, 2 species are lost, none are orphaned
LostBIRD[[68]][,5] <- 1 #at 6, 1 species is lost, none are orphaned
LostBIRD[[68]][,6] <- 0 #at 7, 0 species is lost, none are orphaned
Lost[[18]][,5] <- 2 #at 6, 2 species lost, 2 species orphaned
LostBIRD[[18]][,5] <- 17 #at 6, 17 species lost, no species orphaned
Lost[[18]][,6] <- 2 #at 7, 2 species lost, no species orphaned
LostBIRD[[18]][,6] <- 0 #at 7, no species lost, no species orphaned
Lost[[19]][,5] <- 2 #at 6, 2 species lost, 2 species orphaned
LostBIRD[[19]][,5] <- 17 #at 6, 17 species lost, no species orphaned
Lost[[19]][,6] <- 2 #at 7, 2 species lost, no species orphaned
LostBIRD[[19]][,6] <- 0 #at 7, no species lost, no species orphaned
Lost[[3]][,5] <- 0 #at 6, no species lost, no species orphaned
Lost[[3]][,6] <- 1 #at 7, 1 species lost, no species orphaned
LostBIRD[[3]][,5] <- 14 #at 6, 14 species lost, no species orphaned
LostBIRD[[3]][,6] <- 0 #at 7, 0 species lost, no species orphaned
Lost[[13]][,6] <- 0 #no species lost, 1 species orphaned
LostBIRD[[13]][,6] <- 1 #1 species lost, no species orphaned
Lost[[97]][,6] <- 0 #no species lost, 1 species orphaned
LostBIRD[[97]][,6] <- 3 #3 species lost, no species orphaned
Lost[[102]][,6] <- 0 #no species lost, 1 species orphaned
LostBIRD[[102]][,6] <- 5 #5 species lost, no species orphaned
Lost[[110]][,6] <- 0 #no species lost, 2 species orphaned
LostBIRD[[110]][,6] <- 3 #3 species lost, no species orphaned
Lost[[112]][,6] <- 0 #no species lost, 4 species orphaned
LostBIRD[[112]][,6] <- 10 #10 species lost, no species orphaned
OrphanedBIRD[[3]][,6] <- 0 #this actually collapses at 6
OrphanedBIRD[[3]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[13]][,7] <- 0
OrphanedBIRD[[18]][,6] <- 0 #this actually collapses at 6
OrphanedBIRD[[19]][,6] <- 0 #this actually collapses at 6
OrphanedBIRD[[18]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[19]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[22]][,6] <- 0 #this actually collapses at 6 - all of these species are orphaned at 5
OrphanedBIRD[[22]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[68]][,6] <- 2 #this actually collapses at 6
OrphanedBIRD[[68]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[97]][,7] <- 0
OrphanedBIRD[[102]][,6] <- 1
OrphanedBIRD[[102]][,7] <- 0
OrphanedBIRD[[110]][,7] <- 0
OrphanedBIRD[[112]][,7] <- 0
Orphaned[[3]][,6] <- 0 #this actually collapses at 6
Orphaned[[3]][,7] <- 0 #this actually collapses at 6
Orphaned[[13]][,7] <- 1
Orphaned[[18]][,6] <- 2 #this actually collapses at 6
Orphaned[[19]][,6] <- 2 #this actually collapses at 6
Orphaned[[18]][,7] <- 0 #this actually collapses at 6
Orphaned[[19]][,7] <- 0 #this actually collapses at 6
Orphaned[[22]][,6] <- 0 #this actually collapses at 6 - the one species is orphaned at 5
Orphaned[[22]][,7] <- 0 #this actually collapses at 6
Orphaned[[68]][,6] <- 2 #this actually collapses at 6
Orphaned[[68]][,7] <- 0 #this actually collapses at 6
Orphaned[[97]][,7] <- 1
Orphaned[[102]][,7] <- 1
Orphaned[[110]][,7] <- 2
Orphaned[[112]][,7] <- 4
#Number of species in network before area loss at each step
OriginalMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
OriginalMat[i,] <- Original[[i]]
}
#Number of species remaining in network after area loss at each step
RemainMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
RemainMat[i,] <- Remain[[i]]
}
#Number of plant species extirpated by area loss at each step
LostMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
LostMat[i,] <- Lost[[i]]
}
#Number of bird species extirpated by area loss at each step
LostBIRDMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
LostBIRDMat[i,] <- LostBIRD[[i]]
}
#Number of plant species orphaned by area loss at each step
OrphanedMat <- mat.or.vec(nr=129, nc=7)
for(i in 1:129){
OrphanedMat[i,] <- Orphaned[[i]]
}
#All orphaned in the first column are Umbcal (Umbellularia californica), which is not connected to any frugivores in the network. The first column is the initial network, before the first area loss step. The first column was not used in analyses.
#Number of bird species orphaned by area loss at each step
OrphanedBIRDMat <- mat.or.vec(nr=129, nc=7)
for(i in 1:129){
OrphanedBIRDMat[i,] <- OrphanedBIRD[[i]]
}
|
/Scenario 1.R
|
no_license
|
mesandor/Habitat-Loss-Simulations
|
R
| false
| false
| 40,817
|
r
|
#Data files needed for analysis - all available in the Habitat-Loss-Simulations repository
#GridPathForAnalysis.csv
#BirdsGridsJoin.csv - each bird record also has the Grid ID of where it is located for all 7 grid sizes; zipped file because original was too large
#PlantsGridsJoin.csv - same for plants
#BirdNames.csv
#PlantNames.csv
#Read in data files
FN <- read.csv("FullNetworkpresabs.csv")
BirdsEverything <- read.csv("BirdsGridsJoin.csv")
PlantsEverything <- read.csv("PlantsGridsJoin.csv")
BirdNames <- read.csv("BirdNames.csv")
PlantNames <- read.csv("PlantNames.csv")
Birds <- merge(BirdsEverything, BirdNames)
Plants <- merge(PlantsEverything, PlantNames)
#Limit bird observations to April-September
Birds <- subset(Birds, Birds$MONTH!=10)
Birds <- subset(Birds, Birds$MONTH!=11)
Birds <- subset(Birds, Birds$MONTH!=12)
Birds <- subset(Birds, Birds$MONTH!=1)
Birds <- subset(Birds, Birds$MONTH!=2)
Birds <- subset(Birds, Birds$MONTH!=3)
#there are 5 bird species in the eBird files that aren't found in the full network - ACWO, BBWO, CAGU, HOOR, SCOR
Birds <- subset(Birds, Birds$NameCode!="ACWO")
Birds <- subset(Birds, Birds$NameCode!="BBWO")
Birds <- subset(Birds, Birds$NameCode!="CAGU")
Birds <- subset(Birds, Birds$NameCode!="HOOR")
Birds <- subset(Birds, Birds$NameCode!="SCOR")
#Load in the paths data file for the area sizes
Paths <- read.csv("GridPathForAnalysis.csv")
Paths <- Paths[,c(5,6,11,12,17,18,23,24,29,30,35,36,41,42)]
Areas6 <- cbind(Paths$ID_6, Paths$Area_6)
colnames(Areas6) <- c("ID_6", "Area6")
Areas5 <- cbind(Paths$ID_5, Paths$Area_5)
colnames(Areas5) <- c("ID_5", "Area5")
Areas4 <- cbind(Paths$ID_4, Paths$Area_4)
colnames(Areas4) <- c("ID_4", "Area4")
Areas3 <- cbind(Paths$ID_3, Paths$Area_3)
colnames(Areas3) <- c("ID_3", "Area3")
Areas2 <- cbind(Paths$ID_2, Paths$Area_2)
colnames(Areas2) <- c("ID_2", "Area2")
Areas1 <- cbind(Paths$ID_1, Paths$Area_1)
colnames(Areas1) <- c("ID_1", "Area1")
Areas0 <- cbind(Paths$ID, Paths$Area)
colnames(Areas0) <- c("ID", "Area")
#Use the below to modify the plant and bird files
#remove all ID_6 that are smaller than the next grid size down (Area of ID_6 >= 1600000000)
AreasRemove6 <- subset(Areas6, Areas6[,2] >= 1600000000)
length(AreasRemove6[,1])
Areas6 <- AreasRemove6
#remove all of ID_5 that are smaller than the next grid size down (Area of ID_5 >= 400000000)
AreasRemove5 <- subset(Areas5, Areas5[,2] >= 400000000)
length(AreasRemove5[,1])
Areas5 <- AreasRemove5
#remove all of ID_4 that are smaller than the next grid size down (Area of ID_4 >= 100000000)
AreasRemove4 <- subset(Areas4, Areas4[,2] >= 100000000)
length(AreasRemove4[,1])
Areas4 <- AreasRemove4
#remove all of ID_3 that are smaller than the next grid size down (Area of ID_3 >= 25000000)
AreasRemove3 <- subset(Areas3, Areas3[,2] >= 25000000)
length(AreasRemove3[,1])
Areas3 <- AreasRemove3
#remove all of ID_2 that are smaller than the next grid size down (Area of ID_2 >= 6250000)
AreasRemove2 <- subset(Areas2, Areas2[,2] >= 6250000)
length(AreasRemove2[,1])
Areas2 <- AreasRemove2
#remove all of ID_1 that are smaller than the next grid size down (Area of ID_1 >= 1562500)
AreasRemove1 <- subset(Areas1, Areas1[,2] >= 1562500)
length(AreasRemove1[,1])
Areas1 <- AreasRemove1
#remove all of ID that are smaller than 1562500 (Area of ID_ >= 1562500)
AreasRemove0 <- subset(Areas0, Areas0[,2] >= 1562500)
length(AreasRemove0[,1])
Areas0 <- AreasRemove0
#separate into a file for each grid size - 6400 (Grid6), 1600 (Grid5), 400 (Grid4), 100 (Grid3), 25 (Grid2), 6.25 (Grid1), 1.5625 (Grid0) - and table of GridID (columns) by Spp (rows)
PlantsGrid6 <- table(Plants$NameCode, Plants$ID_6)
PlantsGrid5 <- table(Plants$NameCode, Plants$ID_5)
PlantsGrid4 <- table(Plants$NameCode, Plants$ID_4)
PlantsGrid3 <- table(Plants$NameCode, Plants$ID_3)
PlantsGrid2 <- table(Plants$NameCode, Plants$ID_2)
PlantsGrid1 <- table(Plants$NameCode, Plants$ID_1)
PlantsGrid0 <- table(Plants$NameCode, Plants$ID)
#check density of plant sampling in each grid cell
x <- apply(PlantsGrid6, 2, sum)
PlantsArea6 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea6[,1] <- as.numeric(colnames(PlantsGrid6))
PlantsArea6[,2] <- x
colnames(PlantsArea6) <- c("ID_6", "SppNum")
Temp <- merge(PlantsArea6, Areas6)
Plants6 <- unique(Temp)
x <- apply(PlantsGrid5, 2, sum)
PlantsArea5 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea5[,1] <- as.numeric(colnames(PlantsGrid5))
PlantsArea5[,2] <- x
colnames(PlantsArea5) <- c("ID_5", "SppNum")
Temp <- merge(PlantsArea5, Areas5)
Plants5 <- unique(Temp)
x <- apply(PlantsGrid4, 2, sum)
PlantsArea4 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea4[,1] <- as.numeric(colnames(PlantsGrid4))
PlantsArea4[,2] <- x
colnames(PlantsArea4) <- c("ID_4", "SppNum")
Temp <- merge(PlantsArea4, Areas4)
Plants4 <- unique(Temp)
x <- apply(PlantsGrid3, 2, sum)
PlantsArea3 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea3[,1] <- as.numeric(colnames(PlantsGrid3))
PlantsArea3[,2] <- x
colnames(PlantsArea3) <- c("ID_3", "SppNum")
Temp <- merge(PlantsArea3, Areas3)
Plants3 <- unique(Temp)
x <- apply(PlantsGrid2, 2, sum)
PlantsArea2 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea2[,1] <- as.numeric(colnames(PlantsGrid2))
PlantsArea2[,2] <- x
colnames(PlantsArea2) <- c("ID_2", "SppNum")
Temp <- merge(PlantsArea2, Areas2)
Plants2 <- unique(Temp)
x <- apply(PlantsGrid1, 2, sum)
PlantsArea1 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea1[,1] <- as.numeric(colnames(PlantsGrid1))
PlantsArea1[,2] <- x
colnames(PlantsArea1) <- c("ID_1", "SppNum")
Temp <- merge(PlantsArea1, Areas1)
Plants1 <- unique(Temp)
x <- apply(PlantsGrid0, 2, sum)
PlantsArea0 <- mat.or.vec(nr=length(x), nc=2)
PlantsArea0[,1] <- as.numeric(colnames(PlantsGrid0))
PlantsArea0[,2] <- x
colnames(PlantsArea0) <- c("ID", "SppNum")
Temp <- merge(PlantsArea0, Areas0)
Plants0 <- unique(Temp)
#remove the upper and lower (2.5%) quantiles of areas sampled
Density <- Plants6[,2]/Plants6[,3]
Plants6 <- cbind(Plants6, Density)
UQ <- quantile(Plants6[,2]/Plants6[,3], probs=0.975)
LQ <- quantile(Plants6[,2]/Plants6[,3], probs=0.025)
Plants6 <- Plants6[which(Plants6$Density>LQ),]
Plants6 <- Plants6[which(Plants6$Density<UQ),]
Plants <- Plants[which(Plants$ID_6 %in% Plants6$ID_6),]
Density <- Plants5[,2]/Plants5[,3]
Plants5 <- cbind(Plants5, Density)
UQ <- quantile(Plants5[,2]/Plants5[,3], probs=0.975)
LQ <- quantile(Plants5[,2]/Plants5[,3], probs=0.025)
Plants5 <- Plants5[which(Plants5$Density>LQ),]
Plants5 <- Plants5[which(Plants5$Density<UQ),]
Plants <- Plants[which(Plants$ID_5 %in% Plants5$ID_5),]
Density <- Plants4[,2]/Plants4[,3]
Plants4 <- cbind(Plants4, Density)
UQ <- quantile(Plants4[,2]/Plants4[,3], probs=0.975)
LQ <- quantile(Plants4[,2]/Plants4[,3], probs=0.025)
Plants4 <- Plants4[which(Plants4$Density>LQ),]
Plants4 <- Plants4[which(Plants4$Density<UQ),]
Plants <- Plants[which(Plants$ID_4 %in% Plants4$ID_4),]
Density <- Plants3[,2]/Plants3[,3]
Plants3 <- cbind(Plants3, Density)
UQ <- quantile(Plants3[,2]/Plants3[,3], probs=0.975)
LQ <- quantile(Plants3[,2]/Plants3[,3], probs=0.025)
Plants3 <- Plants3[which(Plants3$Density>LQ),]
Plants3 <- Plants3[which(Plants3$Density<UQ),]
Plants <- Plants[which(Plants$ID_3 %in% Plants3$ID_3),]
Density <- Plants2[,2]/Plants2[,3]
Plants2 <- cbind(Plants2, Density)
UQ <- quantile(Plants2[,2]/Plants2[,3], probs=0.975)
LQ <- quantile(Plants2[,2]/Plants2[,3], probs=0.025)
Plants2 <- Plants2[which(Plants2$Density>LQ),]
Plants2 <- Plants2[which(Plants2$Density<UQ),]
Plants <- Plants[which(Plants$ID_2 %in% Plants2$ID_2),]
Density <- Plants1[,2]/Plants1[,3]
Plants1 <- cbind(Plants1, Density)
UQ <- quantile(Plants1[,2]/Plants1[,3], probs=0.975)
LQ <- quantile(Plants1[,2]/Plants1[,3], probs=0.025)
Plants1 <- Plants1[which(Plants1$Density>LQ),]
Plants1 <- Plants1[which(Plants1$Density<UQ),]
Plants <- Plants[which(Plants$ID_1 %in% Plants1$ID_1),]
Density <- Plants0[,2]/Plants0[,3]
Plants0 <- cbind(Plants0, Density)
UQ <- quantile(Plants0[,2]/Plants0[,3], probs=0.975)
LQ <- quantile(Plants0[,2]/Plants0[,3], probs=0.025)
Plants0 <- Plants0[which(Plants0$Density>LQ),]
Plants0 <- Plants0[which(Plants0$Density<UQ),]
Plants <- Plants[which(Plants$ID %in% Plants0$ID),]
#Redo the plant tables to incorporate changes
PlantsGrid6 <- table(Plants$NameCode, Plants$ID_6)
PlantsGrid5 <- table(Plants$NameCode, Plants$ID_5)
PlantsGrid4 <- table(Plants$NameCode, Plants$ID_4)
PlantsGrid3 <- table(Plants$NameCode, Plants$ID_3)
PlantsGrid2 <- table(Plants$NameCode, Plants$ID_2)
PlantsGrid1 <- table(Plants$NameCode, Plants$ID_1)
PlantsGrid0 <- table(Plants$NameCode, Plants$ID)
#the above gives number of INDIVIDUALS of each spp - need to translate it to 0's and 1's so that just counting each spp ONCE (pres/abs)
PlantsGrid6 <- ifelse(PlantsGrid6>=1, 1, 0)
PlantsGrid5 <- ifelse(PlantsGrid5>=1, 1, 0)
PlantsGrid4 <- ifelse(PlantsGrid4>=1, 1, 0)
PlantsGrid3 <- ifelse(PlantsGrid3>=1, 1, 0)
PlantsGrid2 <- ifelse(PlantsGrid2>=1, 1, 0)
PlantsGrid1 <- ifelse(PlantsGrid1>=1, 1, 0)
PlantsGrid0 <- ifelse(PlantsGrid0>=1, 1, 0)
BirdsGrid6 <- table(Birds$NameCode, Birds$ID_6)
BirdsGrid5 <- table(Birds$NameCode, Birds$ID_5)
BirdsGrid4 <- table(Birds$NameCode, Birds$ID_4)
BirdsGrid3 <- table(Birds$NameCode, Birds$ID_3)
BirdsGrid2 <- table(Birds$NameCode, Birds$ID_2)
BirdsGrid1 <- table(Birds$NameCode, Birds$ID_1)
BirdsGrid0 <- table(Birds$NameCode, Birds$ID)
#check density of bird sampling in each grid cell
x <- apply(BirdsGrid6, 2, sum)
BirdsArea6 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea6[,1] <- as.numeric(colnames(BirdsGrid6))
BirdsArea6[,2] <- x
colnames(BirdsArea6) <- c("ID_6", "SppNum")
Temp <- merge(BirdsArea6, Areas6)
Birds6 <- unique(Temp)
x <- apply(BirdsGrid5, 2, sum)
BirdsArea5 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea5[,1] <- as.numeric(colnames(BirdsGrid5))
BirdsArea5[,2] <- x
colnames(BirdsArea5) <- c("ID_5", "SppNum")
Temp <- merge(BirdsArea5, Areas5)
Birds5 <- unique(Temp)
x <- apply(BirdsGrid4, 2, sum)
BirdsArea4 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea4[,1] <- as.numeric(colnames(BirdsGrid4))
BirdsArea4[,2] <- x
colnames(BirdsArea4) <- c("ID_4", "SppNum")
Temp <- merge(BirdsArea4, Areas4)
Birds4 <- unique(Temp)
x <- apply(BirdsGrid3, 2, sum)
BirdsArea3 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea3[,1] <- as.numeric(colnames(BirdsGrid3))
BirdsArea3[,2] <- x
colnames(BirdsArea3) <- c("ID_3", "SppNum")
Temp <- merge(BirdsArea3, Areas3)
Birds3 <- unique(Temp)
x <- apply(BirdsGrid2, 2, sum)
BirdsArea2 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea2[,1] <- as.numeric(colnames(BirdsGrid2))
BirdsArea2[,2] <- x
colnames(BirdsArea2) <- c("ID_2", "SppNum")
Temp <- merge(BirdsArea2, Areas2)
Birds2 <- unique(Temp)
x <- apply(BirdsGrid1, 2, sum)
BirdsArea1 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea1[,1] <- as.numeric(colnames(BirdsGrid1))
BirdsArea1[,2] <- x
colnames(BirdsArea1) <- c("ID_1", "SppNum")
Temp <- merge(BirdsArea1, Areas1)
Birds1 <- unique(Temp)
x <- apply(BirdsGrid0, 2, sum)
BirdsArea0 <- mat.or.vec(nr=length(x), nc=2)
BirdsArea0[,1] <- as.numeric(colnames(BirdsGrid0))
BirdsArea0[,2] <- x
colnames(BirdsArea0) <- c("ID", "SppNum")
Temp <- merge(BirdsArea0, Areas0)
Birds0 <- unique(Temp)
#remove the upper and lower (2.5%) quantiles of areas sampled
Density <- Birds6[,2]/Birds6[,3]
Birds6 <- cbind(Birds6, Density)
UQ <- quantile(Birds6[,2]/Birds6[,3], probs=0.975)
LQ <- quantile(Birds6[,2]/Birds6[,3], probs=0.025)
Birds6 <- Birds6[which(Birds6$Density>LQ),]
Birds6 <- Birds6[which(Birds6$Density<UQ),]
Birds <- Birds[which(Birds$ID_6 %in% Birds6$ID_6),]
Density <- Birds5[,2]/Birds5[,3]
Birds5 <- cbind(Birds5, Density)
UQ <- quantile(Birds5[,2]/Birds5[,3], probs=0.975)
LQ <- quantile(Birds5[,2]/Birds5[,3], probs=0.025)
Birds5 <- Birds5[which(Birds5$Density>LQ),]
Birds5 <- Birds5[which(Birds5$Density<UQ),]
Birds <- Birds[which(Birds$ID_5 %in% Birds5$ID_5),]
Density <- Birds4[,2]/Birds4[,3]
Birds4 <- cbind(Birds4, Density)
UQ <- quantile(Birds4[,2]/Birds4[,3], probs=0.975)
LQ <- quantile(Birds4[,2]/Birds4[,3], probs=0.025)
Birds4 <- Birds4[which(Birds4$Density>LQ),]
Birds4 <- Birds4[which(Birds4$Density<UQ),]
Birds <- Birds[which(Birds$ID_4 %in% Birds4$ID_4),]
Density <- Birds3[,2]/Birds3[,3]
Birds3 <- cbind(Birds3, Density)
UQ <- quantile(Birds3[,2]/Birds3[,3], probs=0.975)
LQ <- quantile(Birds3[,2]/Birds3[,3], probs=0.025)
Birds3 <- Birds3[which(Birds3$Density>LQ),]
Birds3 <- Birds3[which(Birds3$Density<UQ),]
Birds <- Birds[which(Birds$ID_3 %in% Birds3$ID_3),]
Density <- Birds2[,2]/Birds2[,3]
Birds2 <- cbind(Birds2, Density)
UQ <- quantile(Birds2[,2]/Birds2[,3], probs=0.975)
LQ <- quantile(Birds2[,2]/Birds2[,3], probs=0.025)
Birds2 <- Birds2[which(Birds2$Density>LQ),]
Birds2 <- Birds2[which(Birds2$Density<UQ),]
Birds <- Birds[which(Birds$ID_2 %in% Birds2$ID_2),]
Density <- Birds1[,2]/Birds1[,3]
Birds1 <- cbind(Birds1, Density)
UQ <- quantile(Birds1[,2]/Birds1[,3], probs=0.975)
LQ <- quantile(Birds1[,2]/Birds1[,3], probs=0.025)
Birds1 <- Birds1[which(Birds1$Density>LQ),]
Birds1 <- Birds1[which(Birds1$Density<UQ),]
Birds <- Birds[which(Birds$ID_1 %in% Birds1$ID_1),]
Density <- Birds0[,2]/Birds0[,3]
Birds0 <- cbind(Birds0, Density)
UQ <- quantile(Birds0[,2]/Birds0[,3], probs=0.975)
LQ <- quantile(Birds0[,2]/Birds0[,3], probs=0.025)
Birds0 <- Birds0[which(Birds0$Density>LQ),]
Birds0 <- Birds0[which(Birds0$Density<UQ),]
Birds <- Birds[which(Birds$ID %in% Birds0$ID),]
#Have to redo the tables to incorporate changes
BirdsGrid6 <- table(Birds$NameCode, Birds$ID_6)
BirdsGrid5 <- table(Birds$NameCode, Birds$ID_5)
BirdsGrid4 <- table(Birds$NameCode, Birds$ID_4)
BirdsGrid3 <- table(Birds$NameCode, Birds$ID_3)
BirdsGrid2 <- table(Birds$NameCode, Birds$ID_2)
BirdsGrid1 <- table(Birds$NameCode, Birds$ID_1)
BirdsGrid0 <- table(Birds$NameCode, Birds$ID)
BirdsGrid6 <- ifelse(BirdsGrid6>=1, 1, 0)
BirdsGrid5 <- ifelse(BirdsGrid5>=1, 1, 0)
BirdsGrid4 <- ifelse(BirdsGrid4>=1, 1, 0)
BirdsGrid3 <- ifelse(BirdsGrid3>=1, 1, 0)
BirdsGrid2 <- ifelse(BirdsGrid2>=1, 1, 0)
BirdsGrid1 <- ifelse(BirdsGrid1>=1, 1, 0)
BirdsGrid0 <- ifelse(BirdsGrid0>=1, 1, 0)
#Run the path analysis
Paths <- read.csv("GridPathForAnalysis.csv")
PathsRed6 <- Paths[which(Paths$ID_6 %in% colnames(BirdsGrid6) & Paths$ID_6 %in% colnames(PlantsGrid6)), ]
PathsRed5 <- PathsRed6[which(PathsRed6$ID_5 %in% colnames(BirdsGrid5) & PathsRed6$ID_5 %in% colnames(PlantsGrid5)), ]
PathsRed4 <- PathsRed5[which(PathsRed5$ID_4 %in% colnames(BirdsGrid4) & PathsRed5$ID_4 %in% colnames(PlantsGrid4)), ]
PathsRed3 <- PathsRed4[which(PathsRed4$ID_3 %in% colnames(BirdsGrid3) & PathsRed4$ID_3 %in% colnames(PlantsGrid3)), ]
PathsRed2 <- PathsRed3[which(PathsRed3$ID_2 %in% colnames(BirdsGrid2) & PathsRed3$ID_2 %in% colnames(PlantsGrid2)), ]
PathsRed1 <- PathsRed2[which(PathsRed2$ID_1 %in% colnames(BirdsGrid1) & PathsRed2$ID_1 %in% colnames(PlantsGrid1)), ]
PathsRed0 <- PathsRed1[which(PathsRed1$ID %in% colnames(BirdsGrid0) & PathsRed1$ID %in% colnames(PlantsGrid0)), ] #number of rows here is used below - (129)
#save for area
PathsRed0AREA <- PathsRed0
PathsRed0 <- PathsRed0[,c(5, 11, 17, 23, 29, 35, 41)]
Orphaned <- list()
OrphanedBIRD <- list()
Lost <- list()
LostBIRD <- list()
Original <- list()
Remain <- list()
#use the 1st (or whatever) row of paths to look up the correct (matching ID) col in the proper Veg file and the correct (matching ID) col in the proper Bird file
Veg1 <- list()
Veg2 <- list()
Veg3 <- list()
Veg4 <- list()
Veg5 <- list()
Veg6 <- list()
Veg7 <- list()
Bird1 <- list()
Bird2 <- list()
Bird3 <- list()
Bird4 <- list()
Bird5 <- list()
Bird6 <- list()
Bird7 <- list()
Network1 <- list()
Network2 <- list()
Network3 <- list()
Network4 <- list()
Network5 <- list()
Network6 <- list()
Network7 <- list()
Network1R <- list()
Network2R <- list()
Network3R <- list()
Network4R <- list()
Network5R <- list()
Network6R <- list()
Network7R <- list()
x1 <- list()
x2 <- list()
x3 <- list()
x4 <- list()
x5 <- list()
x6 <- list()
y1 <- list()
y2 <- list()
y3 <- list()
y4 <- list()
y5 <- list()
y6 <- list()
y7 <- list()
y8 <- list()
y9 <- list()
y10 <- list()
y11 <- list()
y12 <- list()
y13 <- list()
y14 <- list()
y15 <- list()
y16 <- list()
y17 <- list()
y18 <- list()
y19 <- list()
y20 <- list()
y21 <- list()
temp1 <- list()
temp2 <- list()
temp3 <- list()
temp4 <- list()
temp5 <- list()
temp6 <- list()
temp7 <- list()
temp8 <- list()
temp9 <- list()
temp10 <- list()
temp11 <- list()
temp12 <- list()
temp13 <- list()
temp14 <- list()
temp15 <- list()
temp16 <- list()
temp17 <- list()
temp18 <- list()
temp19 <- list()
temp20 <- list()
temp21 <- list()
for(j in 1:129){
Veg1[[j]] <- as.data.frame(PlantsGrid6[,c(1,which(colnames(PlantsGrid6)==PathsRed0[j,7]))]) #col 7 is ID_6, which is Grid Size 6400
Veg1[[j]] <- Veg1[[j]][which(Veg1[[j]][,2]==1),] #uses only the spp that are present
Bird1[[j]] <- as.data.frame(BirdsGrid6[,c(1,which(colnames(BirdsGrid6)==PathsRed0[j,7]))]) #col 7 is ID_6, which is Grid Size 6400
Bird1[[j]] <- Bird1[[j]][which(Bird1[[j]][,2]==1),]
#construct initial network (Grid Size 6400)
#merge Veg with the Full Network to reduce to just Veg spp present
Network1[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg1[[j]])),]
#Reduce the network to just bird species that are present
Network1R[[j]] <- Network1[[j]][,c(1,which(colnames(Network1[[j]])[-c(1:2)] %in% rownames(Bird1[[j]]))+2)]
#construct next network (Grid Size 1600)
Veg2[[j]] <- as.data.frame(PlantsGrid5[,c(1,which(colnames(PlantsGrid5)==PathsRed0[j,6]))]) #col 6 is ID_5, which is Grid Size 1600
Veg2[[j]] <- Veg2[[j]][which(Veg2[[j]][,2]==1),]
Bird2[[j]] <- as.data.frame(BirdsGrid5[,c(1,which(colnames(BirdsGrid5)==PathsRed0[j,6]))]) #col 6 is ID_5, which is Grid Size 1600
Bird2[[j]] <- Bird2[[j]][which(Bird2[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network2[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg2[[j]])),]
#Reduce the network to just bird species that are present
Network2R[[j]] <- Network2[[j]][,c(1,which(colnames(Network2[[j]])[-c(1:2)] %in% rownames(Bird2[[j]]))+2)]
#construct next network (Grid Size 400)
Veg3[[j]] <- as.data.frame(PlantsGrid4[,c(1,which(colnames(PlantsGrid4)==PathsRed0[j,5]))]) #col 5 is ID_4, which is Grid Size 400
Veg3[[j]] <- Veg3[[j]][which(Veg3[[j]][,2]==1),]
Bird3[[j]] <- as.data.frame(BirdsGrid4[,c(1,which(colnames(BirdsGrid4)==PathsRed0[j,5]))]) #col 5 is ID_4, which is Grid Size 400
Bird3[[j]] <- Bird3[[j]][which(Bird3[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network3[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg3[[j]])),]
#Reduce the network to just bird species that are present
Network3R[[j]] <- Network3[[j]][,c(1,which(colnames(Network3[[j]])[-c(1:2)] %in% rownames(Bird3[[j]]))+2)]
#construct next network (Grid Size 100)
Veg4[[j]] <- as.data.frame(PlantsGrid3[,c(1,which(colnames(PlantsGrid3)==PathsRed0[j,4]))]) #col 4 is ID_3, Grid Size 100
Veg4[[j]] <- Veg4[[j]][which(Veg4[[j]][,2]==1),]
Bird4[[j]] <- as.data.frame(BirdsGrid3[,c(1,which(colnames(BirdsGrid3)==PathsRed0[j,4]))]) #col 4 is ID_3, Grid Size 100
Bird4[[j]] <- Bird4[[j]][which(Bird4[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network4[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg4[[j]])),]
#Reduce the network to just bird species that are present
Network4R[[j]] <- Network4[[j]][,c(1,which(colnames(Network4[[j]])[-c(1:2)] %in% rownames(Bird4[[j]]))+2)]
#construct next network (Grid Size 25)
Veg5[[j]] <- as.data.frame(PlantsGrid2[,c(1,which(colnames(PlantsGrid2)==PathsRed0[j,3]))]) #col 3 is ID_2, which is Grid Size 25
Veg5[[j]] <- Veg5[[j]][which(Veg5[[j]][,2]==1),]
Bird5[[j]] <- as.data.frame(BirdsGrid2[,c(1,which(colnames(BirdsGrid2)==PathsRed0[j,3]))]) #col 3 is ID_2, which is Grid Size 25
Bird5[[j]] <- Bird5[[j]][which(Bird5[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network5[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg5[[j]])),]
#Reduce the network to just bird species that are present
Network5R[[j]] <- Network5[[j]][,c(1,which(colnames(Network5[[j]])[-c(1:2)] %in% rownames(Bird5[[j]]))+2)]
#construct next network (Grid Size 6.25)
Veg6[[j]] <- as.data.frame(PlantsGrid1[,c(1,which(colnames(PlantsGrid1)==PathsRed0[j,2]))]) #col 2 is ID_1, which is Grid Size 6.25
Veg6[[j]] <- Veg6[[j]][which(Veg6[[j]][,2]==1),]
Bird6[[j]] <- as.data.frame(BirdsGrid1[,c(1,which(colnames(BirdsGrid1)==PathsRed0[j,2]))]) #col 2 is ID_1, which is Grid Size 6.25
Bird6[[j]] <- Bird6[[j]][which(Bird6[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network6[[j]] <- FN[which(FN$Scientific.Name %in% rownames(Veg6[[j]])),]
#Reduce the network to just bird species that are present
Network6R[[j]] <- Network6[[j]][,c(1,which(colnames(Network6[[j]])[-c(1:2)] %in% rownames(Bird6[[j]]))+2)]
Network6R[[j]] <- as.data.frame(Network6R[[j]])
#construct next Network (1.5625)
Veg7[[j]] <- as.data.frame(PlantsGrid0[,c(1,which(colnames(PlantsGrid0)==PathsRed0[j,1]))]) #col 1 is ID, which is Grid Size 1.5625
Veg7[[j]] <- Veg7[[j]][which(Veg7[[j]][,2]==1),]
Bird7[[j]] <- as.data.frame(BirdsGrid0[,c(1,which(colnames(BirdsGrid0)==PathsRed0[j,1]))]) #col 1 is ID, which is Grid Size 1.5625
Bird7[[j]] <- Bird7[[j]][which(Bird7[[j]][,2]==1),]
#merge Veg with the Full Network to reduce to just Veg spp present
Network7[[j]] <- as.data.frame(FN[which(FN$Scientific.Name %in% rownames(Veg7[[j]])),])
#Reduce the network to just bird species that are present
Network7R[[j]] <- Network7[[j]][,c(1,which(colnames(Network7[[j]])[-c(1:2)] %in% rownames(Bird7[[j]]))+2)]
Network7R[[j]] <- as.data.frame(Network7R[[j]])
}
for(j in 1:129){
#count initial "Orphaned" plant species
Orphaned[[j]] <- mat.or.vec(nr=1, nc=7)
#take out the rows that were 0 (Orphaned spp) in the previous networks
if (length(which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0))>0){
Network2R[[j]] <- subset(Network2R[[j]], Network2R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network3R[[j]] <- subset(Network3R[[j]], Network3R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network4R[[j]] <- subset(Network4R[[j]], Network4R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network1R[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0))>0){
Network3R[[j]] <- subset(Network3R[[j]], Network3R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network4R[[j]] <- subset(Network4R[[j]], Network4R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network2R[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0))>0){
Network4R[[j]] <- subset(Network4R[[j]], Network4R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network3R[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0))>0){
Network5R[[j]] <- subset(Network5R[[j]], Network5R[[j]][,1]!=
Network4R[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0),1])
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network4R[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network4R[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0),1])
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0))>0){
Network6R[[j]] <- subset(Network6R[[j]], Network6R[[j]][,1]!=
Network5R[[j]][which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0),1])
Network7R[[j]] <- subset(Network7R[[j]], Network7R[[j]][,1]!=
Network5R[[j]][which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0),1])
}
}
#22 is empty for Network6R, #36 throws a warning message (species only present in one taxonomic group)
N <- c(1:129)
N <- N[-c(22,36)]
for(j in 1:127){
if (length(which(apply(data.matrix(Network6R[[N[j]]][,-1]), 1, sum)==0))>0){
Network7R[[N[j]]] <- subset(Network7R[[N[j]]], Network7R[[N[j]]][,1]!=
Network6R[[N[j]]][which(apply(data.matrix(Network6R[[N[j]]][,-1]), 1, sum)==0),1])
}
}
#because #36 throws a warning message
Network7R[[36]] <- Network6R[[36]][2,]
#take out the COLUMNS that were 0 (Orphaned spp) in the previous networks
for(j in 1:129){
if (length(which(apply(data.matrix(Network1R[[j]][,-1]), 2, sum)==0))>0){
x1[[j]] <- colnames(Network1R[[j]][,-1])
x1[[j]] <- x1[[j]][which(apply(data.matrix(Network1R[[j]][,-1]), 2, sum)==0)]
y1[[j]] <- colnames(Network2R[[j]])
temp1[[j]] <- which(y1[[j]] %in% x1[[j]])
if(length(temp1[[j]]>0)){
Network2R[[j]] <- Network2R[[j]][,-temp1[[j]]]
}
y2[[j]] <- colnames(Network3R[[j]])
temp2[[j]] <- which(y2[[j]] %in% x1[[j]])
if(length(temp2[[j]]>0)){
Network3R[[j]] <- Network3R[[j]][,-temp2[[j]]]
}
y3[[j]] <- colnames(Network4R[[j]])
temp3[[j]] <- which(y3[[j]] %in% x1[[j]])
if(length(temp3[[j]]>0)){
Network4R[[j]] <- Network4R[[j]][,-temp3[[j]]]
}
y4[[j]] <- colnames(Network5R[[j]])
temp4[[j]] <- which(y4[[j]] %in% x1[[j]])
if(length(temp4[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp4[[j]]]
}
y5[[j]] <- colnames(Network6R[[j]])
temp5[[j]] <- which(y5[[j]] %in% x1[[j]])
if(length(temp5[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp5[[j]]]
}
y6[[j]] <- colnames(Network7R[[j]])
temp6[[j]] <- which(y6[[j]] %in% x1[[j]])
if(length(temp6[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp6[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network2R[[j]][,-1]), 2, sum)==0))>0){
x2[[j]] <- colnames(Network2R[[j]][,-1])
x2[[j]] <- x2[[j]][which(apply(data.matrix(Network2R[[j]][,-1]), 2, sum)==0)]
y7[[j]] <- colnames(Network3R[[j]])
temp7[[j]] <- which(y7[[j]] %in% x2[[j]])
if(length(temp7[[j]]>0)){
Network3R[[j]] <- Network3R[[j]][,-temp7[[j]]]
}
y8[[j]] <- colnames(Network4R[[j]])
temp8[[j]] <- which(y8[[j]] %in% x2[[j]])
if(length(temp8[[j]]>0)){
Network4R[[j]] <- Network4R[[j]][,-temp8[[j]]]
}
y9[[j]] <- colnames(Network5R[[j]])
temp9[[j]] <- which(y9[[j]] %in% x2[[j]])
if(length(temp9[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp9[[j]]]
}
y10[[j]] <- colnames(Network6R[[j]])
temp10[[j]] <- which(y10[[j]] %in% x2[[j]])
if(length(temp10[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp10[[j]]]
}
y11[[j]] <- colnames(Network7R[[j]])
temp11[[j]] <- which(y11[[j]] %in% x2[[j]])
if(length(temp11[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp11[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network3R[[j]][,-1]), 2, sum)==0))>0){
x3[[j]] <- colnames(Network3R[[j]][,-1])
x3[[j]] <- x3[[j]][which(apply(data.matrix(Network3R[[j]][,-1]), 2, sum)==0)]
y12[[j]] <- colnames(Network4R[[j]])
temp12[[j]] <- which(y12[[j]] %in% x3[[j]])
if(length(temp12[[j]]>0)){
Network4R[[j]] <- Network4R[[j]][,-temp12[[j]]]
}
y13[[j]] <- colnames(Network5R[[j]])
temp13[[j]] <- which(y13[[j]] %in% x3[[j]])
if(length(temp13[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp13[[j]]]
}
y14[[j]] <- colnames(Network6R[[j]])
temp14[[j]] <- which(y14[[j]] %in% x3[[j]])
if(length(temp14[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp14[[j]]]
}
y15[[j]] <- colnames(Network7R[[j]])
temp15[[j]] <- which(y15[[j]] %in% x3[[j]])
if(length(temp15[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp15[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0))>0){
x4[[j]] <- colnames(Network4R[[j]][,-1])
x4[[j]] <- x4[[j]][which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0)]
y16[[j]] <- colnames(Network5R[[j]])
temp16[[j]] <- which(y16[[j]] %in% x4[[j]])
if(length(temp16[[j]]>0)){
Network5R[[j]] <- Network5R[[j]][,-temp16[[j]]]
}
}
}
#Network6R[[68]] is empty. But Network6R[[22]] is also empty and does not give warning messages.
Network6R[[68]] <- Network6R[[22]]
Network7R[[68]] <- Network7R[[22]]
for(j in 1:129){
if (length(which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0))>0){
y17[[j]] <- colnames(Network6R[[j]])
temp17[[j]] <- which(y17[[j]] %in% x4[[j]])
if(length(temp17[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp17[[j]]]
}
y18[[j]] <- colnames(Network7R[[j]])
temp18[[j]] <- which(y18[[j]] %in% x4[[j]])
if(length(temp18[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp18[[j]]]
}
}
}
for(j in 1:129){
if (length(which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0))>0){
x5[[j]] <- colnames(Network5R[[j]])[-1]
x5[[j]] <- x5[[j]][which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0)]
y19[[j]] <- colnames(Network6R[[j]])
temp19[[j]] <- which(y19[[j]] %in% x5[[j]])
if(length(temp19[[j]]>0)){
Network6R[[j]] <- Network6R[[j]][,-temp19[[j]]]
}
}
}
#Network7R[[19]] and Network7R[[18]] are empty. But Network7R[[22]] is also empty and does not give warning messages.
Network7R[[19]] <- Network7R[[22]]
Network7R[[18]] <- Network7R[[22]]
for(j in 1:129){
if (length(which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0))>0){
y20[[j]] <- colnames(Network7R[[j]])
temp20[[j]] <- which(y20[[j]] %in% x5[[j]])
if(length(temp20[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp20[[j]]]
}
}
}
#Network6R[[3]] is empty. But Network7R[[68]] is also empty and does not give warning messages.
Network6R[[3]] <- Network6R[[68]]
Network7R[[3]] <- Network7R[[68]]
Network6R[[18]] <- Network6R[[68]]
Network7R[[18]] <- Network7R[[68]]
Network6R[[19]] <- Network6R[[68]]
Network7R[[19]] <- Network7R[[68]]
Network6R[[22]] <- Network6R[[68]]
Network7R[[22]] <- Network7R[[68]]
for(j in 1:129){
if (length(which(apply(data.matrix(Network6R[[j]][,-1]), 1, sum)==0))>0){
x6[[j]] <- colnames(Network6R[[j]])[-1]
x6[[j]] <- x6[[j]][which(apply(data.matrix(Network6R[[j]][,-1]), 2, sum)==0)]
y21[[j]] <- colnames(Network7R[[j]])
temp21[[j]] <- which(y21[[j]] %in% x6[[j]])
if(length(temp21[[j]]>0)){
Network7R[[j]] <- Network7R[[j]][,-temp21[[j]]]
}
}
}
#Fix any of the other empty 7R networks by replacing with an empty network that does not give warning messages.
Network7R[[13]] <- Network7R[[22]]
Network7R[[97]] <- Network7R[[22]]
Network7R[[102]] <- Network7R[[22]]
Network7R[[110]] <- Network7R[[22]]
Network7R[[112]] <- Network7R[[22]]
for(j in 1:129){
Orphaned[[j]] <- mat.or.vec(nr=1, nc=7)
#count plant Orphaned species
Orphaned[[j]][1] <- length(which(apply(data.matrix(Network1R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][2] <- length(which(apply(data.matrix(Network2R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][3] <- length(which(apply(data.matrix(Network3R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][4] <- length(which(apply(data.matrix(Network4R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][5] <- length(which(apply(data.matrix(Network5R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][6] <- length(which(apply(data.matrix(Network6R[[j]][,-1]), 1, sum)==0))
Orphaned[[j]][7] <- length(which(apply(data.matrix(Network7R[[j]][,-1]), 1, sum)==0))
}
for(j in 1:129){
#count bird Orphaned species
OrphanedBIRD[[j]] <- mat.or.vec(nr=1, nc=7)
OrphanedBIRD[[j]][1] <- length(which(apply(data.matrix(Network1R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][2] <- length(which(apply(data.matrix(Network2R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][3] <- length(which(apply(data.matrix(Network3R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][4] <- length(which(apply(data.matrix(Network4R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][5] <- length(which(apply(data.matrix(Network5R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][6] <- length(which(apply(data.matrix(Network6R[[j]][,-1]), 2, sum)==0))
OrphanedBIRD[[j]][7] <- length(which(apply(data.matrix(Network7R[[j]][,-1]), 2, sum)==0))
#count lost (extinct) plant species
Lost[[j]] <- mat.or.vec(nr=1, nc=6)
#count lost (extinct) PLANT species
Lost[[j]][1] <- length(Network1R[[j]][,1])-length(Network2R[[j]][,1])
Lost[[j]][2] <- length(Network2R[[j]][,1])-length(Network3R[[j]][,1])
Lost[[j]][3] <- length(Network3R[[j]][,1])-length(Network4R[[j]][,1])
Lost[[j]][4] <- length(Network4R[[j]][,1])-length(Network5R[[j]][,1])
Lost[[j]][5] <- length(Network5R[[j]][,1])-length(Network6R[[j]][,1])
Lost[[j]][6] <- length(Network6R[[j]][,1])-length(Network7R[[j]][,1])
#count lost (extinct) bird species
LostBIRD[[j]] <- mat.or.vec(nr=1, nc=6)
#count lost (extinct) BIRD species
LostBIRD[[j]][1] <- length(Network1R[[j]][1,])-length(Network2R[[j]][1,])
LostBIRD[[j]][2] <- length(Network2R[[j]][1,])-length(Network3R[[j]][1,])
LostBIRD[[j]][3] <- length(Network3R[[j]][1,])-length(Network4R[[j]][1,])
LostBIRD[[j]][4] <- length(Network4R[[j]][1,])-length(Network5R[[j]][1,])
LostBIRD[[j]][5] <- length(Network5R[[j]][1,])-length(Network6R[[j]][1,])
LostBIRD[[j]][6] <- length(Network6R[[j]][1,])-length(Network7R[[j]][1,])
Original[[j]] <- mat.or.vec(nr=1, nc=6)
Original[[j]][1] <- length(Network1R[[j]][,1])
Original[[j]][2] <- length(Network2R[[j]][,1])
Original[[j]][3] <- length(Network3R[[j]][,1])
Original[[j]][4] <- length(Network4R[[j]][,1])
Original[[j]][5] <- length(Network5R[[j]][,1])
Original[[j]][6] <- length(Network6R[[j]][,1])
#count initial "remaining" plant species
Remain[[j]] <- mat.or.vec(nr=1, nc=6)
#count remaining species
Remain[[j]][1] <- length(Network2R[[j]][,1])
Remain[[j]][2] <- length(Network3R[[j]][,1])
Remain[[j]][3] <- length(Network4R[[j]][,1])
Remain[[j]][4] <- length(Network5R[[j]][,1])
Remain[[j]][5] <- length(Network6R[[j]][,1])
Remain[[j]][6] <- length(Network7R[[j]][,1])
}
#this is the end of the 129 simulations
#when networks collapsed to no species within the simulations, we had to manually input the number of extinct and orphaned species
#these steps are necessary because numbers of species lost and orphaned for these networks is incorrect otherwise
Lost[[68]][,5] <- 1 #at 6, 1 species is lost, 2 are orphaned
Lost[[68]][,6] <- 2 #at 7, 2 species are lost, none are orphaned
LostBIRD[[68]][,5] <- 1 #at 6, 1 species is lost, none are orphaned
LostBIRD[[68]][,6] <- 0 #at 7, 0 species is lost, none are orphaned
Lost[[18]][,5] <- 2 #at 6, 2 species lost, 2 species orphaned
LostBIRD[[18]][,5] <- 17 #at 6, 17 species lost, no species orphaned
Lost[[18]][,6] <- 2 #at 7, 2 species lost, no species orphaned
LostBIRD[[18]][,6] <- 0 #at 7, no species lost, no species orphaned
Lost[[19]][,5] <- 2 #at 6, 2 species lost, 2 species orphaned
LostBIRD[[19]][,5] <- 17 #at 6, 17 species lost, no species orphaned
Lost[[19]][,6] <- 2 #at 7, 2 species lost, no species orphaned
LostBIRD[[19]][,6] <- 0 #at 7, no species lost, no species orphaned
Lost[[3]][,5] <- 0 #at 6, no species lost, no species orphaned
Lost[[3]][,6] <- 1 #at 7, 1 species lost, no species orphaned
LostBIRD[[3]][,5] <- 14 #at 6, 14 species lost, no species orphaned
LostBIRD[[3]][,6] <- 0 #at 7, 0 species lost, no species orphaned
Lost[[13]][,6] <- 0 #no species lost, 1 species orphaned
LostBIRD[[13]][,6] <- 1 #1 species lost, no species orphaned
Lost[[97]][,6] <- 0 #no species lost, 1 species orphaned
LostBIRD[[97]][,6] <- 3 #3 species lost, no species orphaned
Lost[[102]][,6] <- 0 #no species lost, 1 species orphaned
LostBIRD[[102]][,6] <- 5 #5 species lost, no species orphaned
Lost[[110]][,6] <- 0 #no species lost, 2 species orphaned
LostBIRD[[110]][,6] <- 3 #3 species lost, no species orphaned
Lost[[112]][,6] <- 0 #no species lost, 4 species orphaned
LostBIRD[[112]][,6] <- 10 #10 species lost, no species orphaned
OrphanedBIRD[[3]][,6] <- 0 #this actually collapses at 6
OrphanedBIRD[[3]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[13]][,7] <- 0
OrphanedBIRD[[18]][,6] <- 0 #this actually collapses at 6
OrphanedBIRD[[19]][,6] <- 0 #this actually collapses at 6
OrphanedBIRD[[18]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[19]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[22]][,6] <- 0 #this actually collapses at 6 - all of these species are orphaned at 5
OrphanedBIRD[[22]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[68]][,6] <- 2 #this actually collapses at 6
OrphanedBIRD[[68]][,7] <- 0 #this actually collapses at 6
OrphanedBIRD[[97]][,7] <- 0
OrphanedBIRD[[102]][,6] <- 1
OrphanedBIRD[[102]][,7] <- 0
OrphanedBIRD[[110]][,7] <- 0
OrphanedBIRD[[112]][,7] <- 0
Orphaned[[3]][,6] <- 0 #this actually collapses at 6
Orphaned[[3]][,7] <- 0 #this actually collapses at 6
Orphaned[[13]][,7] <- 1
Orphaned[[18]][,6] <- 2 #this actually collapses at 6
Orphaned[[19]][,6] <- 2 #this actually collapses at 6
Orphaned[[18]][,7] <- 0 #this actually collapses at 6
Orphaned[[19]][,7] <- 0 #this actually collapses at 6
Orphaned[[22]][,6] <- 0 #this actually collapses at 6 - the one species is orphaned at 5
Orphaned[[22]][,7] <- 0 #this actually collapses at 6
Orphaned[[68]][,6] <- 2 #this actually collapses at 6
Orphaned[[68]][,7] <- 0 #this actually collapses at 6
Orphaned[[97]][,7] <- 1
Orphaned[[102]][,7] <- 1
Orphaned[[110]][,7] <- 2
Orphaned[[112]][,7] <- 4
#Number of species in network before area loss at each step
OriginalMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
OriginalMat[i,] <- Original[[i]]
}
#Number of species remaining in network after area loss at each step
RemainMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
RemainMat[i,] <- Remain[[i]]
}
#Number of plant species extirpated by area loss at each step
LostMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
LostMat[i,] <- Lost[[i]]
}
#Number of bird species extirpated by area loss at each step
LostBIRDMat <- mat.or.vec(nr=129, nc=6)
for(i in 1:129){
LostBIRDMat[i,] <- LostBIRD[[i]]
}
#Number of plant species orphaned by area loss at each step
OrphanedMat <- mat.or.vec(nr=129, nc=7)
for(i in 1:129){
OrphanedMat[i,] <- Orphaned[[i]]
}
#All orphaned in the first column are Umbcal (Umbellularia californica), which is not connected to any frugivores in the network. The first column is the initial network, before the first area loss step. The first column was not used in analyses.
#Number of bird species orphaned by area loss at each step
OrphanedBIRDMat <- mat.or.vec(nr=129, nc=7)
for(i in 1:129){
OrphanedBIRDMat[i,] <- OrphanedBIRD[[i]]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.r
\name{msgpack_format}
\alias{msgpack_format}
\title{Format data for msgpack}
\usage{
msgpack_format(x)
}
\arguments{
\item{x}{An r object.}
}
\value{
A formatted R object to use as input to msgpack_pack.
}
\description{
A helper function to format R data for input to msgpack
}
|
/man/msgpack_format.Rd
|
no_license
|
skyformat99/msgpack2R
|
R
| false
| true
| 368
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.r
\name{msgpack_format}
\alias{msgpack_format}
\title{Format data for msgpack}
\usage{
msgpack_format(x)
}
\arguments{
\item{x}{An r object.}
}
\value{
A formatted R object to use as input to msgpack_pack.
}
\description{
A helper function to format R data for input to msgpack
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply_categorical_mappings.R
\name{apply.categorical.mappings}
\alias{apply.categorical.mappings}
\title{Applies categorical mappings}
\usage{
apply.categorical.mappings(data, categorical.mappings, map.mode = "auto",
progress = TRUE)
}
\arguments{
\item{data}{[required | data.frame] Dataset containing features to apply mappings to}
\item{categorical.mappings}{[required | list] Output from function map.categorical.encoding}
\item{map.mode}{[optional | character | default="auto"] Type of mappings to apply. Options are auto, target, proportional, ordinal, onehot, onehot.prop, report, where auto is a combination between onehot and target. Tracking features are created which flags if a feature has a low proportional category in it. Other types of feature engineering includes, weighted mean noise target encoding, proportional encoding, ordinal proportional encoding, one hot encoding and low proportional one hot encoding which flags all low proportional categories as "other". Report cleans up levels so that the data can be represented in reports and charts.}
\item{progress}{[optional | logical | default=TRUE] Display a progress bar}
}
\value{
Data frame with newly added features and original features
}
\description{
Applies feature engineered mapping tables for categorical features. Uses the output from the function map.categorical.encoding to apply these mappings.
}
\examples{
ce <- map.categorical.encoding(data = iris,x = "Species", y = "Petal.Width")
new_iris <- apply.categorical.mappings(data = iris, categorical.mappings = ce)
}
\author{
Xander Horn
}
|
/man/apply.categorical.mappings.Rd
|
permissive
|
XanderHorn/lazy
|
R
| false
| true
| 1,658
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply_categorical_mappings.R
\name{apply.categorical.mappings}
\alias{apply.categorical.mappings}
\title{Applies categorical mappings}
\usage{
apply.categorical.mappings(data, categorical.mappings, map.mode = "auto",
progress = TRUE)
}
\arguments{
\item{data}{[required | data.frame] Dataset containing features to apply mappings to}
\item{categorical.mappings}{[required | list] Output from function map.categorical.encoding}
\item{map.mode}{[optional | character | default="auto"] Type of mappings to apply. Options are auto, target, proportional, ordinal, onehot, onehot.prop, report, where auto is a combination between onehot and target. Tracking features are created which flags if a feature has a low proportional category in it. Other types of feature engineering includes, weighted mean noise target encoding, proportional encoding, ordinal proportional encoding, one hot encoding and low proportional one hot encoding which flags all low proportional categories as "other". Report cleans up levels so that the data can be represented in reports and charts.}
\item{progress}{[optional | logical | default=TRUE] Display a progress bar}
}
\value{
Data frame with newly added features and original features
}
\description{
Applies feature engineered mapping tables for categorical features. Uses the output from the function map.categorical.encoding to apply these mappings.
}
\examples{
ce <- map.categorical.encoding(data = iris,x = "Species", y = "Petal.Width")
new_iris <- apply.categorical.mappings(data = iris, categorical.mappings = ce)
}
\author{
Xander Horn
}
|
if(!("party" %in% rownames(installed.packages())))
install.packages("party")
#install.packages("party")
library(party)
########
#This is an attempt to build together a benchmarking environment
find_predictions<-function(path_info,input_path,underlying_asset,predict_year,parameter_period,method,benchmark,refresh_period="monthly",output_path,progressOutput=TRUE){
party_rules<-ctree_control(minbucket=7)
if(progressOutput){
print(paste0("Starting finding error predictions of ",path_info[,"file_name"]))
}
market_data<-read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/",underlying_asset,"_",predict_year-1,"_options_filtered_A12.csv"),header=TRUE)
if(parameter_period > 1){
for(back_time in 2:parameter_period){
market_data<-rbind(market_data,read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/",underlying_asset,"_",predict_year-back_time,"_options_filtered_A12.csv"),header=TRUE))
}
}
market_data<-market_data[market_data$ExpirationPrices != -1,]
market_data$DataDate<-as.Date(market_data$DataDate)
market_data$RealExpiration<-as.Date(market_data$RealExpiration)
if(parameter_period < 1){
stop("Parameter period should be one or more, preferably less than 5.")
}
training_data<-read.csv(paste0(input_path,path_info[,"model_path"],"/",underlying_asset,"_",predict_year-1,"_",path_info[,"file_name"],".csv"),header=TRUE)
if(parameter_period > 1){
for(back_time in 2:parameter_period){
training_data<-rbind(training_data,read.csv(paste0(input_path,path_info[,"model_path"],"/",underlying_asset,"_",predict_year-back_time,"_",path_info[,"file_name"],".csv"),header=TRUE))
}
}
training_data<-training_data[training_data$ExpirationPrices != -1,]
training_data$DataDate<- as.Date(training_data$DataDate)
if(nrow(training_data) != nrow(market_data)){
stop("Market data and training data are not compatible!!!!")
}
if(benchmark == "market"){
if(method == "arpe"){
est_error<-abs(training_data$Last-training_data[,path_info[,"price_column"]])/training_data$Last
}else if(method == "mse"){
est_error<-(training_data$Last-training_data[,path_info[,"price_column"]])^2
}else{
stop("Wrong method chosen. We have only a limited number of options.")
}
}else if(benchmark == "realizations"){
if(method == "pnl"){
est_error<- ifelse(training_data[,path_info[,"price_column"]]>market_data$Last,-1,1)*market_data$PDAbsolute
}else{
stop("Wrong method chosen. We have only a limited number of options.")
}
}else{
stop("Choose a proper benchmark! Either market or realizations.")
}
prediction_data<-read.csv(paste0(input_path,path_info[,"model_path"],"/",underlying_asset,"_",predict_year,"_",path_info[,"file_name"],".csv"),header=TRUE)
prediction_data<-prediction_data[prediction_data$ExpirationPrices != -1,]
prediction_data$DataDate<- as.Date(prediction_data$DataDate)
if(refresh_period == "yearly"){
if(benchmark=="realizations"){
pass_index<-which(market_data$RealExpiration < as.Date(paste0(predict_year,"-01-01")))
est_error<-est_error[pass_index]
training_data<-training_data[pass_index,]
}
cluster_data<-data.frame(ErrorValues=est_error,NetMaturity=training_data$NetMaturity,Moneyness=training_data$Moneyness)
cluster_learn<-ctree(ErrorValues ~ NetMaturity + Moneyness, data=cluster_data, controls=party_rules)
cluster_nodes<-predict(cluster_learn,type="node")
write.table(cbind(training_data,cluster_data$ErrorValues,cluster_nodes),paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_",benchmark,"_",path_info[,"file_name"],"_with_lookback_",parameter_period,"y_errordata.csv"),sep=",",row.names=FALSE,append=FALSE)
node_averages<-aggregate(cluster_data$ErrorValues,by=list(cluster_nodes),"mean")
colnames(node_averages)<-c("node","average")
predict_nodes<-predict(cluster_learn, newdata=data.frame(NetMaturity=prediction_data$NetMaturity, Moneyness=prediction_data$Moneyness),type="node")
predicted_errors<-node_averages[match(predict_nodes,node_averages$node),"average"]
if(progressOutput){
print("Done")
}
return(predicted_errors)
}else if(refresh_period == "monthly"){
predict_nodes<-rep(-1,nrow(prediction_data))
predicted_errors<-rep(-1,nrow(prediction_data))
for(i in 1:12){
if(benchmark=="realizations"){
prediction_index <- which(as.Date(ifelse(i<12,paste0(predict_year,"-",i+1,"-01"),paste0(predict_year+1,"-01-01"))) > prediction_data$DataDate & prediction_data$DataDate >= as.Date(paste0(predict_year,"-",i,"-01")))
training_index <- which(as.Date(paste0(predict_year,"-",i,"-01")) > training_data$DataDate & training_data$DataDate >= as.Date(paste0(predict_year - parameter_period,"-",i,"-01")) & market_data$RealExpiration < as.Date(paste0(predict_year,"-",i,"-01")))
}else{
prediction_index <- which(as.Date(ifelse(i<12,paste0(predict_year,"-",i+1,"-01"),paste0(predict_year+1,"-01-01"))) > prediction_data$DataDate & prediction_data$DataDate >= as.Date(paste0(predict_year,"-",i,"-01")))
training_index <- which(as.Date(paste0(predict_year,"-",i,"-01")) > training_data$DataDate & training_data$DataDate >= as.Date(paste0(predict_year - parameter_period,"-",i,"-01")))
}
cluster_data<-data.frame(ErrorValues=est_error[training_index],NetMaturity=training_data$NetMaturity[training_index],Moneyness=training_data$Moneyness[training_index])
cluster_learn<-ctree(ErrorValues ~ NetMaturity + Moneyness, data=cluster_data, controls=party_rules)
cluster_nodes<-predict(cluster_learn,type="node")
# write.table(cbind(training_data,cluster_data$ErrorValues,cluster_nodes),paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_",benchmark,"_",path_info[,"file_name"],"_with_lookback_",parameter_period,"y_errordata.csv"),sep=",",row.names=FALSE,append=FALSE)
node_averages<-aggregate(cluster_data$ErrorValues,by=list(cluster_nodes),"mean")
colnames(node_averages)<-c("node","average")
predict_nodes[prediction_index]<-predict(cluster_learn, newdata=data.frame(NetMaturity=prediction_data$NetMaturity[prediction_index], Moneyness=prediction_data$Moneyness[prediction_index]),type="node")
predicted_errors[prediction_index]<-node_averages[match(predict_nodes[prediction_index],node_averages$node),"average"]
}
if(any(predict_nodes < 0)){
stop("Some months are missing in prediction")
}
if(progressOutput){
print("Done")
}
return(predicted_errors)
}else{
stop("Refresh period is not chosen properly.")
}
}
table_benchmark_summary<-function(pnl_data,contenders){
benchmark_summary<-rep(0,nrow(contenders)+2)
names(benchmark_summary)<-c("selection",contenders[,"file_name"],"market_long")
#Long Calls
pnl_filter<-pnl_data[pnl_data$Type == "call",]
benchmark_summary<-rbind(benchmark_summary,"Long Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
#Short Calls
benchmark_summary<-rbind(benchmark_summary,"Short Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
#Long Puts
pnl_filter<-pnl_data[pnl_data$Type == "put",]
benchmark_summary<-rbind(benchmark_summary,"Long Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
#Short Puts
benchmark_summary<-rbind(benchmark_summary,"Short Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
#Total Longs
benchmark_summary<-rbind(benchmark_summary,"Total Longs" = colSums(benchmark_summary[c("Long Calls","Long Puts"),]))
#Total Shorts
benchmark_summary<-rbind(benchmark_summary,"Total Shorts" = colSums(benchmark_summary[c("Short Calls","Short Puts"),]))
#Total Long Investment
benchmark_summary<-rbind(benchmark_summary,"Total Long Investment" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) != 0)),sum(pnl_data$Last)))
#Total Short Capital
benchmark_summary<-rbind(benchmark_summary,"Total Short Capital" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) == 0)),0))
#Total Nonnegative Contracts
benchmark_summary<-rbind(benchmark_summary,"Total Nonnegative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0))
#Total Profit
benchmark_summary<-rbind(benchmark_summary,"Total Profit" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
#Total Negative Contracts
benchmark_summary<-rbind(benchmark_summary,"Total Negative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0))
#Total Loss
benchmark_summary<-rbind(benchmark_summary,"Total Loss" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
#PnL
benchmark_summary<-rbind(benchmark_summary,"PnL" = colSums(benchmark_summary[c("Total Profit","Total Loss"),]))
#Total Consensus
benchmark_summary<-rbind(benchmark_summary,"Total Consensus" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0)))
#Total Wins
benchmark_summary<-rbind(benchmark_summary,
"Total Wins" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
benchmark_summary["Total Wins","market_long"]<-benchmark_summary["Total Wins","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0)
#Total Losses
benchmark_summary<-rbind(benchmark_summary,
"Total Losses" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
benchmark_summary["Total Losses","market_long"] <- benchmark_summary["Total Losses","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0)
#Total Doom
benchmark_summary<-rbind(benchmark_summary,"Total Doom" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0)))
#Total Consensus Dollars
benchmark_summary<-rbind(benchmark_summary,"Total Consensus Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0))))
#Total Wins Dollars
benchmark_summary<-rbind(benchmark_summary,
"Total Wins Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
benchmark_summary["Total Wins Dollars","market_long"]<-benchmark_summary["Total Wins Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0))
#Total Losses
benchmark_summary<-rbind(benchmark_summary,
"Total Losses Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
benchmark_summary["Total Losses Dollars","market_long"]<-benchmark_summary["Total Losses Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0))
#Total Doom Dollars
benchmark_summary<-rbind(benchmark_summary,"Total Doom Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0))))
return(benchmark_summary[-1,])
}
benchmark_mondrian<-function(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset,predict_year=2013,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE){
if(length(seeds) != nrow(contenders)){
stop("Seeds are not equal to contenders")
}
market_prices<-read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/",underlying_asset,"_",predict_year,"_options_filtered_A12.csv"),header=TRUE)
if(progressOutput){
print("Predicting Errors")
}
for(i in 1:nrow(contenders)){
set.seed(seeds[i])
if(i == 1){
error_sets<-find_predictions(path_info=contenders[i,1:3] , input_path=input_path,underlying_asset=underlying_asset,predict_year=predict_year,parameter_period=parameter_period,method=method,benchmark=benchmark,refresh_period=refresh_period,output_path=output_path,progressOutput=progressOutput)
price_file<-read.csv(paste0(input_path,contenders[i,"model_path"],"/",underlying_asset,"_",predict_year,"_",contenders[i,"file_name"],".csv"),header=TRUE)
price_info<-price_file[,contenders[i,"price_column"]]
}
else{
error_sets<-cbind(error_sets,find_predictions(path_info=contenders[i,1:3] , input_path=input_path,underlying_asset=underlying_asset,predict_year=predict_year,parameter_period=parameter_period,method=method,benchmark=benchmark,refresh_period=refresh_period,output_path=output_path,progressOutput=progressOutput))
price_file<-read.csv(paste0(input_path,contenders[i,"model_path"],"/",underlying_asset,"_",predict_year,"_",contenders[i,"file_name"],".csv"),header=TRUE)
price_info<-cbind(price_info,price_file[,contenders[i,"price_column"]])
}
}
colnames(error_sets)<-contenders[,"file_name"]
if(progressOutput){
print("Writing Error Data")
}
write.table(error_sets,paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_errors.csv"),sep=",",row.names=FALSE,append=FALSE)
price_info<-price_info[market_prices$ExpirationPrices != -1,]
selections<-max.col(-error_sets)
selections_prices<-rep(0,length(selections))
for(i in 1:nrow(contenders)){
selections_prices[selections==i]<-price_info[selections==i,i]
}
market_prices<-market_prices[market_prices$ExpirationPrices != -1,]
price_info<-cbind(selections_prices,price_info,market_prices$Last)
colnames(price_info)<-c("selection",contenders[,"file_name"],"market_prices")
if(progressOutput){
print("Writing Price Data")
}
write.table(cbind(market_prices[,c("UnderlyingPrice","OptionSymbol","Type","DataDate","Expiration","RealExpiration","Moneyness","NetMaturity","Strike","ExpirationPrices","Last","ExpirationPayoff","PDAbsolute")],
price_info),paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_price_info.csv"),sep=",",row.names=FALSE,append=FALSE)
# return(price_info)
pnl_info<-ifelse(price_info[,"selection"]>market_prices$Last,1,-1)*market_prices$PDAbsolute
for(i in 1:nrow(contenders)){
pnl_info<-cbind(pnl_info,ifelse(price_info[,i+1]>market_prices$Last,1,-1)*market_prices$PDAbsolute)
}
pnl_info<-cbind(pnl_info,market_prices$PDAbsolute)
colnames(pnl_info)<-c("selection",contenders[,"file_name"],"market_long")
if(progressOutput){
print("Writing PnL Data")
}
pnl_data<-cbind(market_prices[,c("UnderlyingPrice","OptionSymbol","Type","DataDate","Expiration","RealExpiration","Moneyness","NetMaturity","Strike","ExpirationPrices","Last","ExpirationPayoff","PDAbsolute")],
pnl_info)
write.table(pnl_data,paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_pnl.csv"),sep=",",row.names=FALSE,append=FALSE)
if(progressOutput){
print("Writing Benchmark Summary")
}
write.table(table_benchmark_summary(pnl_data,contenders),paste0(input_path,"Benchmarks/Summaries/",underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_benchmark_summary.csv"),sep=",",row.names=TRUE,col.names=NA,append=FALSE)
return(pnl_data)
}
# pnl_tots<-NULL
for(prediction.date in 2013:2010){
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_tots<-rbind(pnl_tots,cbind(prediction.date,pnl_vals))
#print(colSums(pnl_vals))
}
for(prediction.date in 2013:2010){
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_tots<-rbind(pnl_tots,cbind(prediction.date,pnl_vals))
#print(colSums(pnl_vals))
}
for(prediction.date in 2013:2010){
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_tots<-rbind(pnl_tots,cbind(prediction.date,pnl_vals))
#print(colSums(pnl_vals))
}
# print(colSums(pnl_tots[,-1]))
pnl_summaries<-function(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="SPX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE){
#Calculate PnLs
pnl_tots<-NULL
for(prediction.date in year_from:year_to){
if(only_read){
pnl_vals<-read.csv(paste0(output_path,underlying_asset,"_",prediction.date,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_benchmark_summary.csv"),header=TRUE)
pnl_tots<-pnl_tots + pnl_vals
}else{
if(progressOutput){
print(paste0("Starting ",prediction.date," benchmarks."))
}
pnl_vals<-benchmark_mondrian(contenders,seeds,input_path=input_path, method=method,benchmark=benchmark,underlying_asset=underlying_asset,predict_year=prediction.date,parameter_period=parameter_period,refresh_period=refresh_period,output_path=output_path,progressOutput=progressOutput)
pnl_tots<-rbind(pnl_tots,pnl_vals)
}
}
if(progressOutput){
print("Getting Aggregate Summmary")
}
write.table(table_benchmark_summary(pnl_tots,contenders),paste0(input_path,"Benchmarks/Summaries/",underlying_asset,"_from_",year_from,"_to_",year_to,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_benchmark_summary_aggregate.csv"),sep=",",row.names=TRUE,col.names=NA,append=FALSE)
# #Get Summaries
# for(prediction.date in year_from:year_to){
# pnl_data<-read.csv(paste0(output_path,underlying_asset,prediction.date,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_pnl.csv"),header=TRUE)
# #Long Calls
# pnl_filter<-pnl_data[pnl_data$Type == "call",]
# benchmark_summary<-rbind(benchmark_summary,"Long Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
# #Short Calls
# benchmark_summary<-rbind(benchmark_summary,"Short Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
# #Long Puts
# pnl_filter<-pnl_data[pnl_data$Type == "put",]
# benchmark_summary<-rbind(benchmark_summary,"Long Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
# #Short Puts
# benchmark_summary<-rbind(benchmark_summary,"Short Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
# #Total Longs
# benchmark_summary<-rbind(benchmark_summary,"Total Longs" = colSums(benchmark_summary[c("Long Calls","Long Puts"),]))
# #Total Shorts
# benchmark_summary<-rbind(benchmark_summary,"Total Shorts" = colSums(benchmark_summary[c("Short Calls","Short Puts"),]))
# #Total Long Investment
# benchmark_summary<-rbind(benchmark_summary,"Total Long Investment" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) != 0)),sum(pnl_data$Last)))
# #Total Short Capital
# benchmark_summary<-rbind(benchmark_summary,"Total Short Capital" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) == 0)),0))
# #Total Nonnegative Contracts
# benchmark_summary<-rbind(benchmark_summary,"Total Nonnegative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0))
# #Total Profit
# benchmark_summary<-rbind(benchmark_summary,"Total Profit" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
# #Total Negative Contracts
# benchmark_summary<-rbind(benchmark_summary,"Total Negative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0))
# #Total Loss
# benchmark_summary<-rbind(benchmark_summary,"Total Loss" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
# benchmark_summary<-benchmark_summary[c(2:nrow(benchmark_summary),1),]
# #Total Consensus
# benchmark_summary<-rbind(benchmark_summary,"Total Consensus" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0)))
# #Total Wins
# benchmark_summary<-rbind(benchmark_summary,
# "Total Wins" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
# benchmark_summary["Total Wins","market_long"]<-benchmark_summary["Total Wins","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0)
# #Total Losses
# benchmark_summary<-rbind(benchmark_summary,
# "Total Losses" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
# benchmark_summary["Total Losses","market_long"]<-benchmark_summary["Total Losses","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0)
# #Total Doom
# benchmark_summary<-rbind(benchmark_summary,"Total Doom" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0)))
# #Total Consensus Dollars
# benchmark_summary<-rbind(benchmark_summary,"Total Consensus Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0))))
# #Total Wins Dollars
# benchmark_summary<-rbind(benchmark_summary,
# "Total Wins Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
# benchmark_summary["Total Wins Dollars","market_long"]<-benchmark_summary["Total Wins Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0))
# #Total Losses
# benchmark_summary<-rbind(benchmark_summary,
# "Total Losses Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
# benchmark_summary["Total Losses Dollars","market_long"]<-benchmark_summary["Total Losses Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0))
# #Total Doom Dollars
# benchmark_summary<-rbind(benchmark_summary,"Total Doom Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0))))
# }
}
# contenders<-data.frame(model_path=c("HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","Black_Scholes","Black_Scholes","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt"),
# file_name=c("HN_data_withdiv_symm_2y","HN_data_withdiv_symm_5y","HN_data_withdiv_asym_5y","BS_data_withdiv_2y","BS_data_withdiv_5y",
# "Levy_GHYP_data_withdiv_Esscher_50000_iteration_2y_asymmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_asymmetric","Levy_GHYP_data_withdiv_Esscher_50000_iteration_2y_symmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric",
# "Levy_GHYP_data_withdiv_Esscher_50000_iteration_5y_asymmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_5y_asymmetric","Levy_GHYP_data_withdiv_Esscher_50000_iteration_5y_symmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_5y_symmetric"),
# price_column=c("HN_prices","HN_prices","HN_prices","BS.HV2y","BS.HV2y","LE_prices","LM_prices","LE_prices","LM_prices","LE_prices","LM_prices","LE_prices","LM_prices"),
# stringsAsFactors=FALSE)
# contenders<-data.frame(model_path=c("HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH"),
# file_name=c("HN_data_withdiv_asym_5y_000000","HN_data_withdiv_asym_5y_002000","HN_data_withdiv_asym_5y_010000","HN_data_withdiv_asym_5y_012000","HN_data_withdiv_asym_5y_020000","HN_data_withdiv_asym_5y_022000"),
# price_column=c("HN_prices","HN_prices","HN_prices","HN_prices","HN_prices","HN_prices"),
# stringsAsFactors=FALSE)
contenders<-data.frame(model_path=c("HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","Black_Scholes","Black_Scholes"),
file_name=c("HN_data_withdiv_symm_2y","HN_data_withdiv_symm_5y","HN_data_withdiv_asym_5y","BS_data_withdiv_2y","BS_data_withdiv_5y"),
price_column=c("HN_prices","HN_prices","HN_prices","BS.HV2y","BS.HV2y"),
stringsAsFactors=FALSE)
#seeds<-c(7061414,4111447,515153,3504592,959323,872692,489137,506416,977659,927887,327129,274429,129331)
seeds<-c(7061414,4111447,515153,3504592,959323)
for(par_per in 1:2){
for(ref_per in c("monthly","yearly")){
for(met_name in c("pnl","arpe","mse")){
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method=met_name,benchmark=ifelse(met_name=="pnl","realizations","market"),underlying_asset="NDX",parameter_period=par_per,refresh_period=ref_per,output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method=met_name,benchmark=ifelse(met_name=="pnl","realizations","market"),underlying_asset="NDX",parameter_period=par_per,refresh_period=ref_per,output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
}
}
}
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
# pnl_2013<-benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="SPX",predict_year=2013,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_2012<-benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="SPX",predict_year=2012,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_2011<-benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="SPX",predict_year=2011,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# colSums(pnl_2013)
# colSums(pnl_2012)
# colSums(pnl_2011)
# colSums(pnl_2013) + colSums(pnl_2012) + colSums(pnl_2011)
########
# #This is the temporary code for classification and benchmarking of models
# data_path<-"~/Dropbox/PhD_Workshop/Output Files/"
# #data_path<-"~/Dropbox/PhD_Workshop/Output Files/"
# #Set the asset name
# underlying_asset<-"SPX"
# #Set the data year to be predicted
# data_year<-2013
# #Set random seed for replication
# set.seed(7061414)
# #Import training data from the previous two years
# hn_training<-read.csv(paste0(data_path,"HestonNandi_GARCH/",underlying_asset,"_",data_year-1,"_HN_data.csv"),header=TRUE)
# hn_training<-rbind(hn_training,read.csv(paste0(data_path,"HestonNandi_GARCH/",underlying_asset,"_",data_year-2,"_HN_data.csv"),header=TRUE))
# #120874 rows including far future options
# #Calculate ARPE from the market prices
# hn_arpe<-abs(hn_training$Last-hn_training$HN_prices)/hn_training$Last
# #Calculate AE from expiration payoff. Formula might be wrong
# #hn_arpe<-abs(hn_training$ExpirationPayoff-hn_training$HN_prices)
# #Set Conditional Inference Tree rules
# party_rules<-ctree_control(minbucket=0.005*nrow(hn_training))
# #The rules below are for demonstration only
# #party_rules<-ctree_control(minbucket=0.20*nrow(hn_training),maxdepth=2)
# #Compose the data frame required for classification
# hn_required<-data.frame(arpe=hn_arpe,NetMaturity=hn_training$NetMaturity,Moneyness=hn_training$Moneyness)
# #Use the conditional inference tree to create nodes
# hn_classification<-ctree(arpe ~ NetMaturity + Moneyness, data=hn_required, controls=party_rules)
# #Add nodes to the data frame
# hn_required<-data.frame(hn_required,nodes=predict(hn_classification,type="node"))
# #Calculate node averages
# hn_node_averages<-aggregate(hn_required$arpe,by=list(hn_required$nodes),"mean")
# colnames(hn_node_averages)<-c("node","average")
# #Get the prediction year's data
# hn_predict<-read.csv(paste0(data_path,"HestonNandi_GARCH/",underlying_asset,"_",data_year,"_HN_data.csv"),header=TRUE)
# #Assign nodes for different maturity-moneyness combinations
# hn_predict_nodes<-predict(hn_classification, newdata=data.frame(NetMaturity=hn_predict$NetMaturity, Moneyness=hn_predict$Moneyness),type="node")
# #Predict error performance of the new nodes with the previous data
# hn_predict_performance<-hn_node_averages[match(hn_predict_nodes,hn_node_averages$node),"average"]
# #Repeat for
# #BS
# set.seed(4141607)
# bs_training<-read.csv(paste0(data_path,"Black_Scholes/",underlying_asset,"_",data_year-1,"_BS_data.csv"),header=TRUE)
# bs_training<-rbind(bs_training,read.csv(paste0(data_path,"Black_Scholes/",underlying_asset,"_",data_year-2,"_BS_data.csv"),header=TRUE))
# #120874 rows including far future options
# party_rules<-ctree_control(minbucket=0.005*nrow(bs_training))
# #party_rules<-ctree_control(minbucket=0.2*nrow(bs_training),maxdepth=2)
# bs_arpe<-abs(bs_training$Last-bs_training$BS.HV2y)/bs_training$Last
# #bs_arpe<-abs(bs_training$ExpirationPayoff-bs_training$BS.HV2y)
# bs_required<-data.frame(arpe=bs_arpe,NetMaturity=bs_training$NetMaturity,Moneyness=bs_training$Moneyness)
# bs_classification<-ctree(arpe ~ NetMaturity + Moneyness, data=bs_required)
# bs_required<-data.frame(bs_required,nodes=predict(bs_classification,type="node"))
# bs_node_averages<-aggregate(bs_required$arpe,by=list(bs_required$nodes),"mean")
# colnames(bs_node_averages)<-c("node","average")
# bs_predict<-read.csv(paste0(data_path,"Black_Scholes/",underlying_asset,"_",data_year,"_BS_data.csv"),header=TRUE)
# bs_predict_nodes<-predict(bs_classification, newdata=data.frame(NetMaturity=bs_predict$NetMaturity, Moneyness=bs_predict$Moneyness),type="node")
# bs_predict_performance<-bs_node_averages[match(bs_predict_nodes,bs_node_averages$node),"average"]
# HN_or_BS<-ifelse(max.col(cbind(-hn_predict_performance,-bs_predict_performance))==1,"HN","BS")
# BetPrices<-rep(0,length(HN_or_BS))
# BetPrices[HN_or_BS=="HN"]<-hn_predict$HN_prices[HN_or_BS=="HN"]
# BetPrices[HN_or_BS=="BS"]<-bs_predict$BS.HV2y[HN_or_BS=="BS"]
# option_data<-read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/SPX_",data_year,"_options_filtered_A12.csv"),header=TRUE)
# #ifelse(option_data$Last<BetPrices,1,-1)*option_data$PDAbsolute
# final_df<-data.frame(OptionSymbol=option_data$OptionSymbol, Type=option_data$Type, UnderlyingPrice=option_data$UnderlyingPrice, RealExpiration=option_data$RealExpiration, NetMaturity=option_data$NetMaturity,
# Strike=option_data$Strike, Last=option_data$Last, ExpirationPrices=option_data$ExpirationPrices, Moneyness=option_data$Moneyness , ExpirationPayoff=option_data$ExpirationPayoff,
# PDAbsolute=option_data$PDAbsolute, HN_prices=hn_predict$HN_prices, HN_payoff=ifelse(option_data$Last<hn_predict$HN_prices,1,-1)*option_data$PDAbsolute, BS_prices=bs_predict$BS.HV2y,
# BS_payoff=ifelse(option_data$Last<bs_predict$BS.HV2y,1,-1)*option_data$PDAbsolute, HN_or_BS=HN_or_BS, Bet_prices=BetPrices, Bet_payoff=ifelse(option_data$Last<BetPrices,1,-1)*option_data$PDAbsolute
# )
# final_df<-final_df[(final_df$ExpirationPrices != -1),]
# write.table(final_df,paste0(data_path,"Benchmarks/",underlying_asset,"_",data_year,"_HN_vs_BS_benchmark_2710_marketarpe.csv"),row.names=FALSE,sep=",")
# #####
# nrow(final_df)
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff>=0)
# sum(final_df$HN_payoff<0 & final_df$BS_payoff>=0)
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff<0)
# sum(final_df$HN_payoff<0 & final_df$BS_payoff<0)
# sum(final_df$HN_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$HN_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$HN_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$HN_payoff[final_df$HN_payoff<0 & final_df$BS_payoff<0])
# sum(final_df$BS_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$BS_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$BS_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$BS_payoff[final_df$HN_payoff<0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff<0])
# #####
# benchmark_table<-data.frame(Heston_Nandi=numeric(),Black_Scholes=numeric(),Model_Selection=numeric(),All_Long=numeric())
# #Long Calls
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices >= final_df$Last & final_df$Type == "call"),
# sum(final_df$BS_prices >= final_df$Last & final_df$Type == "call"),
# sum(final_df$Bet_prices >= final_df$Last & final_df$Type == "call"),
# sum(final_df$Type == "call")
# )
# )
# #Short Calls
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices < final_df$Last & final_df$Type == "call"),
# sum(final_df$BS_prices < final_df$Last & final_df$Type == "call"),
# sum(final_df$Bet_prices < final_df$Last & final_df$Type == "call"),
# 0
# )
# )
# #Long Puts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices >= final_df$Last & final_df$Type == "put"),
# sum(final_df$BS_prices >= final_df$Last & final_df$Type == "put"),
# sum(final_df$Bet_prices >= final_df$Last & final_df$Type == "put"),
# sum(final_df$Type == "put")
# )
# )
# #Short Puts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices < final_df$Last & final_df$Type == "put"),
# sum(final_df$BS_prices < final_df$Last & final_df$Type == "put"),
# sum(final_df$Bet_prices < final_df$Last & final_df$Type == "put"),
# 0
# )
# )
# #Total Longs
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices >= final_df$Last),
# sum(final_df$BS_prices >= final_df$Last),
# sum(final_df$Bet_prices >= final_df$Last),
# nrow(final_df)
# )
# )
# #Total Long Capital
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$Last[final_df$HN_prices >= final_df$Last]),
# sum(final_df$Last[final_df$BS_prices >= final_df$Last]),
# sum(final_df$Last[final_df$Bet_prices >= final_df$Last]),
# sum(final_df$Last)
# )
# )
# #Total Shorts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices < final_df$Last),
# sum(final_df$BS_prices < final_df$Last),
# sum(final_df$Bet_prices < final_df$Last),
# 0
# )
# )
# #Total Short Capital
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$Last[final_df$HN_prices < final_df$Last]),
# sum(final_df$Last[final_df$BS_prices < final_df$Last]),
# sum(final_df$Last[final_df$Bet_prices < final_df$Last]),
# 0
# )
# )
# #Total positive contracts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff>=0),
# sum(final_df$BS_payoff>=0),
# sum(final_df$Bet_payoff>=0),
# sum(final_df$PDAbsolute>=0)
# )
# )
# #Total negative contracts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff<0),
# sum(final_df$BS_payoff<0),
# sum(final_df$Bet_payoff<0),
# sum(final_df$PDAbsolute<0)
# )
# )
# #Total profit
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff>=0]),
# sum(final_df$BS_payoff[final_df$BS_payoff>=0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff>=0]),
# sum(final_df$PDAbsolute[final_df$PDAbsolute>=0])
# )
# )
# #Total loss
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff<0]),
# sum(final_df$BS_payoff[final_df$BS_payoff<0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff<0]),
# sum(final_df$PDAbsolute[final_df$PDAbsolute<0])
# )
# )
# #P&L (Balance)
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff>=0]) + sum(final_df$HN_payoff[final_df$HN_payoff<0]),
# sum(final_df$BS_payoff[final_df$BS_payoff>=0]) + sum(final_df$BS_payoff[final_df$BS_payoff<0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff>=0]) + sum(final_df$Bet_payoff[final_df$Bet_payoff<0]),
# sum(final_df$PDAbsolute[final_df$PDAbsolute>=0]) + sum(final_df$PDAbsolute[final_df$PDAbsolute<0])
# )
# )
# #Wins - Contract
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# sum(final_df$HN_payoff<0 & final_df$BS_payoff>=0),
# sum(final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0) + sum(final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# NA
# )
# )
# #Losses - Contract
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff<0 & final_df$BS_payoff>=0),
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# sum(final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0) + sum(final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# NA
# )
# )
# #Wins - Dollars
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# sum(final_df$BS_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0]) + sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# NA
# )
# )
# #Losses - Dollars
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0]),
# sum(final_df$BS_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0]) + sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# NA
# )
# )
# colnames(benchmark_table)<-c("Heston Nandi", "Black Scholes", "Model Selection", "All Long")
# rownames(benchmark_table)<-c(
# "Long Calls - Contracts",
# "Short Calls - Contracts",
# "Long Puts - Contracts",
# "Short Puts - Contracts",
# "Total Longs - Contracts",
# "Total Longs - Dollars",
# "Total Shorts - Contracts",
# "Total Shorts - Dollars",
# "Total Nonnegative - Contracts",
# "Total Negative - Contracts",
# "Total Profit - Dollars",
# "Total Loss - Dollars",
# "PnL (Balance) - Dollars",
# "Wins - Contracts",
# "Losses - Contracts",
# "Wins - Dollars",
# "Losses - Dollars"
# )
# write.table(benchmark_table,paste0(data_path,"Benchmarks/",underlying_asset,"_",data_year,"_HN_vs_BS_benchmark_2710_marketarpe_summary.csv"),row.names=TRUE,sep=",")
# #####
# ####
# ggplot(bs_required,aes(x=NetMaturity,y=Moneyness,fill=arpe))+geom_tile()
# qplot(x=bs_required$NetMaturity,y=bs_required$Moneyness,geom="tile",fill=bs_required$arpe)
# ##
# hn_dectree<-tree(arpe ~ NetMaturity + Moneyness,data=hn_required)
# plot(hn_dectree)
########
### Plot P&L distribution
########
if(!("ggplot2" %in% rownames(installed.packages())))
install.packages("ggplot2")
library(ggplot2)
a2013_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2013_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
a2012_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2012_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
a2011_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2011_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
a2010_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2010_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
# HN_data_withdiv_asym_5y
select_density<-density(c(a2013_results$selection,a2012_results$selection,a2011_results$selection,a2010_results$selection))
select_density_selection<-density(c(a2013_results$selection,a2012_results$selection,a2011_results$selection,a2010_results$selection))
select_density_HN_A_5y<-density(c(a2013_results$HN_data_withdiv_asym_5y,a2012_results$HN_data_withdiv_asym_5y,a2011_results$HN_data_withdiv_asym_5y,a2010_results$HN_data_withdiv_asym_5y))
select_density_BS_5y<-density(c(a2013_results$BS_data_withdiv_5y,a2012_results$BS_data_withdiv_5y,a2011_results$BS_data_withdiv_5y,a2010_results$BS_data_withdiv_5y))
select_density_Levy_S_M_2y<-density(c(a2013_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric,a2012_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric,a2011_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric,a2010_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric))
select_density_marketlong<-density(c(a2013_results$market_long,a2012_results$market_long,a2011_results$market_long,a2010_results$market_long))
my_x<-seq(min(c(select_density_selection$x,select_density_HN_A_5y$x,select_density_BS_5y$x,select_density_Levy_S_M_2y$x)),max(c(select_density_selection$x,select_density_HN_A_5y$x,select_density_BS_5y$x,select_density_Levy_S_M_2y$x)),length.out=1000)
my_x<-seq(-50,50,length.out=1000)
selection_y<-approx(select_density_selection$x,select_density_selection$y,my_x)$y
HN_A_5y_y<-approx(select_density_HN_A_5y$x,select_density_HN_A_5y$y,my_x)$y
BS_5y_y<-approx(select_density_BS_5y$x,select_density_BS_5y$y,my_x)$y
Levy_S_M_2y_y<-approx(select_density_Levy_S_M_2y$x,select_density_Levy_S_M_2y$y,my_x)$y
marketlong_y<-approx(select_density_marketlong$x,select_density_marketlong$y,my_x)$y
benchy<-data.frame(pnl=my_x,selection=selection_y,HN_A_5y=HN_A_5y_y,BS_5y=BS_5y_y,Levy_M_S_2y=Levy_S_M_2y_y,marketlong=marketlong_y)
pl2<-ggplot(data=benchy,aes(x=pnl)) + geom_line(aes(y=selection_y, color="selection")) + geom_line(aes(y=HN_A_5y, color="HN_A_5y")) + geom_line(aes(y=Levy_M_S_2y, color="Levy_M_S_2y")) + geom_line(aes(y=BS_5y, color="BS_5y"))+ geom_line(aes(y=marketlong, color="marketlong")) + labs(color = "Models") + xlab("Net Profit") + ylab("Density") #+ geom_line(aes(y=ghyp.def, color="GHYP.def"))
ggsave("~/Dropbox/PhD_Workshop/Reports/Progress Report 3 - 20150120/images/pnldensities_models_large.png",pl2,width=8,height=5)
plot(select_density)
#select_density<-density(some_results$selection)
my_x<-seq(-100,100,length.out=1000)
my_y<-approx(marketlong_y$x,marketlong_y$y,my_x)$y
plot(my_x,my_y)
|
/Codebase/benchmarktemp.r
|
no_license
|
cobain1992/msf
|
R
| false
| false
| 53,302
|
r
|
if(!("party" %in% rownames(installed.packages())))
install.packages("party")
#install.packages("party")
library(party)
########
#This is an attempt to build together a benchmarking environment
find_predictions<-function(path_info,input_path,underlying_asset,predict_year,parameter_period,method,benchmark,refresh_period="monthly",output_path,progressOutput=TRUE){
party_rules<-ctree_control(minbucket=7)
if(progressOutput){
print(paste0("Starting finding error predictions of ",path_info[,"file_name"]))
}
market_data<-read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/",underlying_asset,"_",predict_year-1,"_options_filtered_A12.csv"),header=TRUE)
if(parameter_period > 1){
for(back_time in 2:parameter_period){
market_data<-rbind(market_data,read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/",underlying_asset,"_",predict_year-back_time,"_options_filtered_A12.csv"),header=TRUE))
}
}
market_data<-market_data[market_data$ExpirationPrices != -1,]
market_data$DataDate<-as.Date(market_data$DataDate)
market_data$RealExpiration<-as.Date(market_data$RealExpiration)
if(parameter_period < 1){
stop("Parameter period should be one or more, preferably less than 5.")
}
training_data<-read.csv(paste0(input_path,path_info[,"model_path"],"/",underlying_asset,"_",predict_year-1,"_",path_info[,"file_name"],".csv"),header=TRUE)
if(parameter_period > 1){
for(back_time in 2:parameter_period){
training_data<-rbind(training_data,read.csv(paste0(input_path,path_info[,"model_path"],"/",underlying_asset,"_",predict_year-back_time,"_",path_info[,"file_name"],".csv"),header=TRUE))
}
}
training_data<-training_data[training_data$ExpirationPrices != -1,]
training_data$DataDate<- as.Date(training_data$DataDate)
if(nrow(training_data) != nrow(market_data)){
stop("Market data and training data are not compatible!!!!")
}
if(benchmark == "market"){
if(method == "arpe"){
est_error<-abs(training_data$Last-training_data[,path_info[,"price_column"]])/training_data$Last
}else if(method == "mse"){
est_error<-(training_data$Last-training_data[,path_info[,"price_column"]])^2
}else{
stop("Wrong method chosen. We have only a limited number of options.")
}
}else if(benchmark == "realizations"){
if(method == "pnl"){
est_error<- ifelse(training_data[,path_info[,"price_column"]]>market_data$Last,-1,1)*market_data$PDAbsolute
}else{
stop("Wrong method chosen. We have only a limited number of options.")
}
}else{
stop("Choose a proper benchmark! Either market or realizations.")
}
prediction_data<-read.csv(paste0(input_path,path_info[,"model_path"],"/",underlying_asset,"_",predict_year,"_",path_info[,"file_name"],".csv"),header=TRUE)
prediction_data<-prediction_data[prediction_data$ExpirationPrices != -1,]
prediction_data$DataDate<- as.Date(prediction_data$DataDate)
if(refresh_period == "yearly"){
if(benchmark=="realizations"){
pass_index<-which(market_data$RealExpiration < as.Date(paste0(predict_year,"-01-01")))
est_error<-est_error[pass_index]
training_data<-training_data[pass_index,]
}
cluster_data<-data.frame(ErrorValues=est_error,NetMaturity=training_data$NetMaturity,Moneyness=training_data$Moneyness)
cluster_learn<-ctree(ErrorValues ~ NetMaturity + Moneyness, data=cluster_data, controls=party_rules)
cluster_nodes<-predict(cluster_learn,type="node")
write.table(cbind(training_data,cluster_data$ErrorValues,cluster_nodes),paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_",benchmark,"_",path_info[,"file_name"],"_with_lookback_",parameter_period,"y_errordata.csv"),sep=",",row.names=FALSE,append=FALSE)
node_averages<-aggregate(cluster_data$ErrorValues,by=list(cluster_nodes),"mean")
colnames(node_averages)<-c("node","average")
predict_nodes<-predict(cluster_learn, newdata=data.frame(NetMaturity=prediction_data$NetMaturity, Moneyness=prediction_data$Moneyness),type="node")
predicted_errors<-node_averages[match(predict_nodes,node_averages$node),"average"]
if(progressOutput){
print("Done")
}
return(predicted_errors)
}else if(refresh_period == "monthly"){
predict_nodes<-rep(-1,nrow(prediction_data))
predicted_errors<-rep(-1,nrow(prediction_data))
for(i in 1:12){
if(benchmark=="realizations"){
prediction_index <- which(as.Date(ifelse(i<12,paste0(predict_year,"-",i+1,"-01"),paste0(predict_year+1,"-01-01"))) > prediction_data$DataDate & prediction_data$DataDate >= as.Date(paste0(predict_year,"-",i,"-01")))
training_index <- which(as.Date(paste0(predict_year,"-",i,"-01")) > training_data$DataDate & training_data$DataDate >= as.Date(paste0(predict_year - parameter_period,"-",i,"-01")) & market_data$RealExpiration < as.Date(paste0(predict_year,"-",i,"-01")))
}else{
prediction_index <- which(as.Date(ifelse(i<12,paste0(predict_year,"-",i+1,"-01"),paste0(predict_year+1,"-01-01"))) > prediction_data$DataDate & prediction_data$DataDate >= as.Date(paste0(predict_year,"-",i,"-01")))
training_index <- which(as.Date(paste0(predict_year,"-",i,"-01")) > training_data$DataDate & training_data$DataDate >= as.Date(paste0(predict_year - parameter_period,"-",i,"-01")))
}
cluster_data<-data.frame(ErrorValues=est_error[training_index],NetMaturity=training_data$NetMaturity[training_index],Moneyness=training_data$Moneyness[training_index])
cluster_learn<-ctree(ErrorValues ~ NetMaturity + Moneyness, data=cluster_data, controls=party_rules)
cluster_nodes<-predict(cluster_learn,type="node")
# write.table(cbind(training_data,cluster_data$ErrorValues,cluster_nodes),paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_",benchmark,"_",path_info[,"file_name"],"_with_lookback_",parameter_period,"y_errordata.csv"),sep=",",row.names=FALSE,append=FALSE)
node_averages<-aggregate(cluster_data$ErrorValues,by=list(cluster_nodes),"mean")
colnames(node_averages)<-c("node","average")
predict_nodes[prediction_index]<-predict(cluster_learn, newdata=data.frame(NetMaturity=prediction_data$NetMaturity[prediction_index], Moneyness=prediction_data$Moneyness[prediction_index]),type="node")
predicted_errors[prediction_index]<-node_averages[match(predict_nodes[prediction_index],node_averages$node),"average"]
}
if(any(predict_nodes < 0)){
stop("Some months are missing in prediction")
}
if(progressOutput){
print("Done")
}
return(predicted_errors)
}else{
stop("Refresh period is not chosen properly.")
}
}
table_benchmark_summary<-function(pnl_data,contenders){
benchmark_summary<-rep(0,nrow(contenders)+2)
names(benchmark_summary)<-c("selection",contenders[,"file_name"],"market_long")
#Long Calls
pnl_filter<-pnl_data[pnl_data$Type == "call",]
benchmark_summary<-rbind(benchmark_summary,"Long Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
#Short Calls
benchmark_summary<-rbind(benchmark_summary,"Short Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
#Long Puts
pnl_filter<-pnl_data[pnl_data$Type == "put",]
benchmark_summary<-rbind(benchmark_summary,"Long Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
#Short Puts
benchmark_summary<-rbind(benchmark_summary,"Short Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
#Total Longs
benchmark_summary<-rbind(benchmark_summary,"Total Longs" = colSums(benchmark_summary[c("Long Calls","Long Puts"),]))
#Total Shorts
benchmark_summary<-rbind(benchmark_summary,"Total Shorts" = colSums(benchmark_summary[c("Short Calls","Short Puts"),]))
#Total Long Investment
benchmark_summary<-rbind(benchmark_summary,"Total Long Investment" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) != 0)),sum(pnl_data$Last)))
#Total Short Capital
benchmark_summary<-rbind(benchmark_summary,"Total Short Capital" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) == 0)),0))
#Total Nonnegative Contracts
benchmark_summary<-rbind(benchmark_summary,"Total Nonnegative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0))
#Total Profit
benchmark_summary<-rbind(benchmark_summary,"Total Profit" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
#Total Negative Contracts
benchmark_summary<-rbind(benchmark_summary,"Total Negative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0))
#Total Loss
benchmark_summary<-rbind(benchmark_summary,"Total Loss" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
#PnL
benchmark_summary<-rbind(benchmark_summary,"PnL" = colSums(benchmark_summary[c("Total Profit","Total Loss"),]))
#Total Consensus
benchmark_summary<-rbind(benchmark_summary,"Total Consensus" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0)))
#Total Wins
benchmark_summary<-rbind(benchmark_summary,
"Total Wins" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
benchmark_summary["Total Wins","market_long"]<-benchmark_summary["Total Wins","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0)
#Total Losses
benchmark_summary<-rbind(benchmark_summary,
"Total Losses" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
benchmark_summary["Total Losses","market_long"] <- benchmark_summary["Total Losses","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0)
#Total Doom
benchmark_summary<-rbind(benchmark_summary,"Total Doom" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0)))
#Total Consensus Dollars
benchmark_summary<-rbind(benchmark_summary,"Total Consensus Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0))))
#Total Wins Dollars
benchmark_summary<-rbind(benchmark_summary,
"Total Wins Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
benchmark_summary["Total Wins Dollars","market_long"]<-benchmark_summary["Total Wins Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0))
#Total Losses
benchmark_summary<-rbind(benchmark_summary,
"Total Losses Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
benchmark_summary["Total Losses Dollars","market_long"]<-benchmark_summary["Total Losses Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0))
#Total Doom Dollars
benchmark_summary<-rbind(benchmark_summary,"Total Doom Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0))))
return(benchmark_summary[-1,])
}
benchmark_mondrian<-function(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset,predict_year=2013,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE){
if(length(seeds) != nrow(contenders)){
stop("Seeds are not equal to contenders")
}
market_prices<-read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/",underlying_asset,"_",predict_year,"_options_filtered_A12.csv"),header=TRUE)
if(progressOutput){
print("Predicting Errors")
}
for(i in 1:nrow(contenders)){
set.seed(seeds[i])
if(i == 1){
error_sets<-find_predictions(path_info=contenders[i,1:3] , input_path=input_path,underlying_asset=underlying_asset,predict_year=predict_year,parameter_period=parameter_period,method=method,benchmark=benchmark,refresh_period=refresh_period,output_path=output_path,progressOutput=progressOutput)
price_file<-read.csv(paste0(input_path,contenders[i,"model_path"],"/",underlying_asset,"_",predict_year,"_",contenders[i,"file_name"],".csv"),header=TRUE)
price_info<-price_file[,contenders[i,"price_column"]]
}
else{
error_sets<-cbind(error_sets,find_predictions(path_info=contenders[i,1:3] , input_path=input_path,underlying_asset=underlying_asset,predict_year=predict_year,parameter_period=parameter_period,method=method,benchmark=benchmark,refresh_period=refresh_period,output_path=output_path,progressOutput=progressOutput))
price_file<-read.csv(paste0(input_path,contenders[i,"model_path"],"/",underlying_asset,"_",predict_year,"_",contenders[i,"file_name"],".csv"),header=TRUE)
price_info<-cbind(price_info,price_file[,contenders[i,"price_column"]])
}
}
colnames(error_sets)<-contenders[,"file_name"]
if(progressOutput){
print("Writing Error Data")
}
write.table(error_sets,paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_errors.csv"),sep=",",row.names=FALSE,append=FALSE)
price_info<-price_info[market_prices$ExpirationPrices != -1,]
selections<-max.col(-error_sets)
selections_prices<-rep(0,length(selections))
for(i in 1:nrow(contenders)){
selections_prices[selections==i]<-price_info[selections==i,i]
}
market_prices<-market_prices[market_prices$ExpirationPrices != -1,]
price_info<-cbind(selections_prices,price_info,market_prices$Last)
colnames(price_info)<-c("selection",contenders[,"file_name"],"market_prices")
if(progressOutput){
print("Writing Price Data")
}
write.table(cbind(market_prices[,c("UnderlyingPrice","OptionSymbol","Type","DataDate","Expiration","RealExpiration","Moneyness","NetMaturity","Strike","ExpirationPrices","Last","ExpirationPayoff","PDAbsolute")],
price_info),paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_price_info.csv"),sep=",",row.names=FALSE,append=FALSE)
# return(price_info)
pnl_info<-ifelse(price_info[,"selection"]>market_prices$Last,1,-1)*market_prices$PDAbsolute
for(i in 1:nrow(contenders)){
pnl_info<-cbind(pnl_info,ifelse(price_info[,i+1]>market_prices$Last,1,-1)*market_prices$PDAbsolute)
}
pnl_info<-cbind(pnl_info,market_prices$PDAbsolute)
colnames(pnl_info)<-c("selection",contenders[,"file_name"],"market_long")
if(progressOutput){
print("Writing PnL Data")
}
pnl_data<-cbind(market_prices[,c("UnderlyingPrice","OptionSymbol","Type","DataDate","Expiration","RealExpiration","Moneyness","NetMaturity","Strike","ExpirationPrices","Last","ExpirationPayoff","PDAbsolute")],
pnl_info)
write.table(pnl_data,paste0(output_path,underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_pnl.csv"),sep=",",row.names=FALSE,append=FALSE)
if(progressOutput){
print("Writing Benchmark Summary")
}
write.table(table_benchmark_summary(pnl_data,contenders),paste0(input_path,"Benchmarks/Summaries/",underlying_asset,"_",predict_year,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_benchmark_summary.csv"),sep=",",row.names=TRUE,col.names=NA,append=FALSE)
return(pnl_data)
}
# pnl_tots<-NULL
for(prediction.date in 2013:2010){
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_tots<-rbind(pnl_tots,cbind(prediction.date,pnl_vals))
#print(colSums(pnl_vals))
}
for(prediction.date in 2013:2010){
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_tots<-rbind(pnl_tots,cbind(prediction.date,pnl_vals))
#print(colSums(pnl_vals))
}
for(prediction.date in 2013:2010){
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",predict_year=prediction.date,parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_tots<-rbind(pnl_tots,cbind(prediction.date,pnl_vals))
#print(colSums(pnl_vals))
}
# print(colSums(pnl_tots[,-1]))
pnl_summaries<-function(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="SPX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE){
#Calculate PnLs
pnl_tots<-NULL
for(prediction.date in year_from:year_to){
if(only_read){
pnl_vals<-read.csv(paste0(output_path,underlying_asset,"_",prediction.date,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_benchmark_summary.csv"),header=TRUE)
pnl_tots<-pnl_tots + pnl_vals
}else{
if(progressOutput){
print(paste0("Starting ",prediction.date," benchmarks."))
}
pnl_vals<-benchmark_mondrian(contenders,seeds,input_path=input_path, method=method,benchmark=benchmark,underlying_asset=underlying_asset,predict_year=prediction.date,parameter_period=parameter_period,refresh_period=refresh_period,output_path=output_path,progressOutput=progressOutput)
pnl_tots<-rbind(pnl_tots,pnl_vals)
}
}
if(progressOutput){
print("Getting Aggregate Summmary")
}
write.table(table_benchmark_summary(pnl_tots,contenders),paste0(input_path,"Benchmarks/Summaries/",underlying_asset,"_from_",year_from,"_to_",year_to,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_benchmark_summary_aggregate.csv"),sep=",",row.names=TRUE,col.names=NA,append=FALSE)
# #Get Summaries
# for(prediction.date in year_from:year_to){
# pnl_data<-read.csv(paste0(output_path,underlying_asset,prediction.date,"_",method,"_with_",benchmark,"_",parameter_period,"y_lookback_",refresh_period,"_refresh_pnl.csv"),header=TRUE)
# #Long Calls
# pnl_filter<-pnl_data[pnl_data$Type == "call",]
# benchmark_summary<-rbind(benchmark_summary,"Long Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
# #Short Calls
# benchmark_summary<-rbind(benchmark_summary,"Short Calls" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
# #Long Puts
# pnl_filter<-pnl_data[pnl_data$Type == "put",]
# benchmark_summary<-rbind(benchmark_summary,"Long Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) != 0),nrow(pnl_filter)))
# #Short Puts
# benchmark_summary<-rbind(benchmark_summary,"Short Puts" = c(colSums((pnl_filter[,c("selection",contenders[,"file_name"])]+pnl_filter[,"PDAbsolute"]) == 0),0))
# #Total Longs
# benchmark_summary<-rbind(benchmark_summary,"Total Longs" = colSums(benchmark_summary[c("Long Calls","Long Puts"),]))
# #Total Shorts
# benchmark_summary<-rbind(benchmark_summary,"Total Shorts" = colSums(benchmark_summary[c("Short Calls","Short Puts"),]))
# #Total Long Investment
# benchmark_summary<-rbind(benchmark_summary,"Total Long Investment" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) != 0)),sum(pnl_data$Last)))
# #Total Short Capital
# benchmark_summary<-rbind(benchmark_summary,"Total Short Capital" = c(colSums(pnl_data$Last*((pnl_data[,c("selection",contenders[,"file_name"])]+pnl_data[,"PDAbsolute"]) == 0)),0))
# #Total Nonnegative Contracts
# benchmark_summary<-rbind(benchmark_summary,"Total Nonnegative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0))
# #Total Profit
# benchmark_summary<-rbind(benchmark_summary,"Total Profit" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
# #Total Negative Contracts
# benchmark_summary<-rbind(benchmark_summary,"Total Negative Contracts" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0))
# #Total Loss
# benchmark_summary<-rbind(benchmark_summary,"Total Loss" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0)*(pnl_data[,c("selection",contenders[,"file_name"],"market_long")])))
# benchmark_summary<-benchmark_summary[c(2:nrow(benchmark_summary),1),]
# #Total Consensus
# benchmark_summary<-rbind(benchmark_summary,"Total Consensus" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0)))
# #Total Wins
# benchmark_summary<-rbind(benchmark_summary,
# "Total Wins" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
# benchmark_summary["Total Wins","market_long"]<-benchmark_summary["Total Wins","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0)
# #Total Losses
# benchmark_summary<-rbind(benchmark_summary,
# "Total Losses" = colSums((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0)))
# benchmark_summary["Total Losses","market_long"]<-benchmark_summary["Total Losses","market_long"] + sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0)
# #Total Doom
# benchmark_summary<-rbind(benchmark_summary,"Total Doom" = c(rep(sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0),nrow(contenders)+1),sum(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0)))
# #Total Consensus Dollars
# benchmark_summary<-rbind(benchmark_summary,"Total Consensus Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] >= 0))))
# #Total Wins Dollars
# benchmark_summary<-rbind(benchmark_summary,
# "Total Wins Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] >= 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
# benchmark_summary["Total Wins Dollars","market_long"]<-benchmark_summary["Total Wins Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] >= 0))
# #Total Losses
# benchmark_summary<-rbind(benchmark_summary,
# "Total Losses Dollars" = colSums(pnl_data[,c("selection",contenders[,"file_name"],"market_long")]*((pnl_data[,c("selection",contenders[,"file_name"],"market_long")] < 0) & (rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) < 1 & rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) > 0))))
# benchmark_summary["Total Losses Dollars","market_long"]<-benchmark_summary["Total Losses Dollars","market_long"] + sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 1 & pnl_data[,"market_long"] < 0))
# #Total Doom Dollars
# benchmark_summary<-rbind(benchmark_summary,"Total Doom Dollars" = c(rep(sum((rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0)*pnl_data[,"selection"]),nrow(contenders)+1),sum(pnl_data[,"market_long"]*(rowSums(pnl_data[,contenders[,"file_name"]] >= 0)/nrow(contenders) == 0 & pnl_data[,"market_long"] < 0))))
# }
}
# contenders<-data.frame(model_path=c("HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","Black_Scholes","Black_Scholes","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt","GHYP_Europt"),
# file_name=c("HN_data_withdiv_symm_2y","HN_data_withdiv_symm_5y","HN_data_withdiv_asym_5y","BS_data_withdiv_2y","BS_data_withdiv_5y",
# "Levy_GHYP_data_withdiv_Esscher_50000_iteration_2y_asymmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_asymmetric","Levy_GHYP_data_withdiv_Esscher_50000_iteration_2y_symmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric",
# "Levy_GHYP_data_withdiv_Esscher_50000_iteration_5y_asymmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_5y_asymmetric","Levy_GHYP_data_withdiv_Esscher_50000_iteration_5y_symmetric","Levy_GHYP_data_withdiv_MCMM_50000_iteration_5y_symmetric"),
# price_column=c("HN_prices","HN_prices","HN_prices","BS.HV2y","BS.HV2y","LE_prices","LM_prices","LE_prices","LM_prices","LE_prices","LM_prices","LE_prices","LM_prices"),
# stringsAsFactors=FALSE)
# contenders<-data.frame(model_path=c("HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH"),
# file_name=c("HN_data_withdiv_asym_5y_000000","HN_data_withdiv_asym_5y_002000","HN_data_withdiv_asym_5y_010000","HN_data_withdiv_asym_5y_012000","HN_data_withdiv_asym_5y_020000","HN_data_withdiv_asym_5y_022000"),
# price_column=c("HN_prices","HN_prices","HN_prices","HN_prices","HN_prices","HN_prices"),
# stringsAsFactors=FALSE)
contenders<-data.frame(model_path=c("HestonNandi_GARCH","HestonNandi_GARCH","HestonNandi_GARCH","Black_Scholes","Black_Scholes"),
file_name=c("HN_data_withdiv_symm_2y","HN_data_withdiv_symm_5y","HN_data_withdiv_asym_5y","BS_data_withdiv_2y","BS_data_withdiv_5y"),
price_column=c("HN_prices","HN_prices","HN_prices","BS.HV2y","BS.HV2y"),
stringsAsFactors=FALSE)
#seeds<-c(7061414,4111447,515153,3504592,959323,872692,489137,506416,977659,927887,327129,274429,129331)
seeds<-c(7061414,4111447,515153,3504592,959323)
for(par_per in 1:2){
for(ref_per in c("monthly","yearly")){
for(met_name in c("pnl","arpe","mse")){
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method=met_name,benchmark=ifelse(met_name=="pnl","realizations","market"),underlying_asset="NDX",parameter_period=par_per,refresh_period=ref_per,output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method=met_name,benchmark=ifelse(met_name=="pnl","realizations","market"),underlying_asset="NDX",parameter_period=par_per,refresh_period=ref_per,output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
}
}
}
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Survey_Results/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="arpe",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2010,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=1,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
pnl_summaries(only_read=FALSE,year_from=2013,year_to=2011,contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="NDX",parameter_period=2,refresh_period="yearly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/",progressOutput=TRUE)
# pnl_2013<-benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="pnl",benchmark="realizations",underlying_asset="SPX",predict_year=2013,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_2012<-benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="SPX",predict_year=2012,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# pnl_2011<-benchmark_mondrian(contenders,seeds,input_path="~/Dropbox/PhD_Workshop/Output Files/", method="mse",benchmark="market",underlying_asset="SPX",predict_year=2011,parameter_period=2,refresh_period="monthly",output_path="~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/")
# colSums(pnl_2013)
# colSums(pnl_2012)
# colSums(pnl_2011)
# colSums(pnl_2013) + colSums(pnl_2012) + colSums(pnl_2011)
########
# #This is the temporary code for classification and benchmarking of models
# data_path<-"~/Dropbox/PhD_Workshop/Output Files/"
# #data_path<-"~/Dropbox/PhD_Workshop/Output Files/"
# #Set the asset name
# underlying_asset<-"SPX"
# #Set the data year to be predicted
# data_year<-2013
# #Set random seed for replication
# set.seed(7061414)
# #Import training data from the previous two years
# hn_training<-read.csv(paste0(data_path,"HestonNandi_GARCH/",underlying_asset,"_",data_year-1,"_HN_data.csv"),header=TRUE)
# hn_training<-rbind(hn_training,read.csv(paste0(data_path,"HestonNandi_GARCH/",underlying_asset,"_",data_year-2,"_HN_data.csv"),header=TRUE))
# #120874 rows including far future options
# #Calculate ARPE from the market prices
# hn_arpe<-abs(hn_training$Last-hn_training$HN_prices)/hn_training$Last
# #Calculate AE from expiration payoff. Formula might be wrong
# #hn_arpe<-abs(hn_training$ExpirationPayoff-hn_training$HN_prices)
# #Set Conditional Inference Tree rules
# party_rules<-ctree_control(minbucket=0.005*nrow(hn_training))
# #The rules below are for demonstration only
# #party_rules<-ctree_control(minbucket=0.20*nrow(hn_training),maxdepth=2)
# #Compose the data frame required for classification
# hn_required<-data.frame(arpe=hn_arpe,NetMaturity=hn_training$NetMaturity,Moneyness=hn_training$Moneyness)
# #Use the conditional inference tree to create nodes
# hn_classification<-ctree(arpe ~ NetMaturity + Moneyness, data=hn_required, controls=party_rules)
# #Add nodes to the data frame
# hn_required<-data.frame(hn_required,nodes=predict(hn_classification,type="node"))
# #Calculate node averages
# hn_node_averages<-aggregate(hn_required$arpe,by=list(hn_required$nodes),"mean")
# colnames(hn_node_averages)<-c("node","average")
# #Get the prediction year's data
# hn_predict<-read.csv(paste0(data_path,"HestonNandi_GARCH/",underlying_asset,"_",data_year,"_HN_data.csv"),header=TRUE)
# #Assign nodes for different maturity-moneyness combinations
# hn_predict_nodes<-predict(hn_classification, newdata=data.frame(NetMaturity=hn_predict$NetMaturity, Moneyness=hn_predict$Moneyness),type="node")
# #Predict error performance of the new nodes with the previous data
# hn_predict_performance<-hn_node_averages[match(hn_predict_nodes,hn_node_averages$node),"average"]
# #Repeat for
# #BS
# set.seed(4141607)
# bs_training<-read.csv(paste0(data_path,"Black_Scholes/",underlying_asset,"_",data_year-1,"_BS_data.csv"),header=TRUE)
# bs_training<-rbind(bs_training,read.csv(paste0(data_path,"Black_Scholes/",underlying_asset,"_",data_year-2,"_BS_data.csv"),header=TRUE))
# #120874 rows including far future options
# party_rules<-ctree_control(minbucket=0.005*nrow(bs_training))
# #party_rules<-ctree_control(minbucket=0.2*nrow(bs_training),maxdepth=2)
# bs_arpe<-abs(bs_training$Last-bs_training$BS.HV2y)/bs_training$Last
# #bs_arpe<-abs(bs_training$ExpirationPayoff-bs_training$BS.HV2y)
# bs_required<-data.frame(arpe=bs_arpe,NetMaturity=bs_training$NetMaturity,Moneyness=bs_training$Moneyness)
# bs_classification<-ctree(arpe ~ NetMaturity + Moneyness, data=bs_required)
# bs_required<-data.frame(bs_required,nodes=predict(bs_classification,type="node"))
# bs_node_averages<-aggregate(bs_required$arpe,by=list(bs_required$nodes),"mean")
# colnames(bs_node_averages)<-c("node","average")
# bs_predict<-read.csv(paste0(data_path,"Black_Scholes/",underlying_asset,"_",data_year,"_BS_data.csv"),header=TRUE)
# bs_predict_nodes<-predict(bs_classification, newdata=data.frame(NetMaturity=bs_predict$NetMaturity, Moneyness=bs_predict$Moneyness),type="node")
# bs_predict_performance<-bs_node_averages[match(bs_predict_nodes,bs_node_averages$node),"average"]
# HN_or_BS<-ifelse(max.col(cbind(-hn_predict_performance,-bs_predict_performance))==1,"HN","BS")
# BetPrices<-rep(0,length(HN_or_BS))
# BetPrices[HN_or_BS=="HN"]<-hn_predict$HN_prices[HN_or_BS=="HN"]
# BetPrices[HN_or_BS=="BS"]<-bs_predict$BS.HV2y[HN_or_BS=="BS"]
# option_data<-read.csv(paste0("~/Dropbox/PhD_Workshop/Input Files/Asset Options/SPX_",data_year,"_options_filtered_A12.csv"),header=TRUE)
# #ifelse(option_data$Last<BetPrices,1,-1)*option_data$PDAbsolute
# final_df<-data.frame(OptionSymbol=option_data$OptionSymbol, Type=option_data$Type, UnderlyingPrice=option_data$UnderlyingPrice, RealExpiration=option_data$RealExpiration, NetMaturity=option_data$NetMaturity,
# Strike=option_data$Strike, Last=option_data$Last, ExpirationPrices=option_data$ExpirationPrices, Moneyness=option_data$Moneyness , ExpirationPayoff=option_data$ExpirationPayoff,
# PDAbsolute=option_data$PDAbsolute, HN_prices=hn_predict$HN_prices, HN_payoff=ifelse(option_data$Last<hn_predict$HN_prices,1,-1)*option_data$PDAbsolute, BS_prices=bs_predict$BS.HV2y,
# BS_payoff=ifelse(option_data$Last<bs_predict$BS.HV2y,1,-1)*option_data$PDAbsolute, HN_or_BS=HN_or_BS, Bet_prices=BetPrices, Bet_payoff=ifelse(option_data$Last<BetPrices,1,-1)*option_data$PDAbsolute
# )
# final_df<-final_df[(final_df$ExpirationPrices != -1),]
# write.table(final_df,paste0(data_path,"Benchmarks/",underlying_asset,"_",data_year,"_HN_vs_BS_benchmark_2710_marketarpe.csv"),row.names=FALSE,sep=",")
# #####
# nrow(final_df)
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff>=0)
# sum(final_df$HN_payoff<0 & final_df$BS_payoff>=0)
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff<0)
# sum(final_df$HN_payoff<0 & final_df$BS_payoff<0)
# sum(final_df$HN_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$HN_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$HN_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$HN_payoff[final_df$HN_payoff<0 & final_df$BS_payoff<0])
# sum(final_df$BS_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$BS_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$BS_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$BS_payoff[final_df$HN_payoff<0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0])
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff<0])
# #####
# benchmark_table<-data.frame(Heston_Nandi=numeric(),Black_Scholes=numeric(),Model_Selection=numeric(),All_Long=numeric())
# #Long Calls
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices >= final_df$Last & final_df$Type == "call"),
# sum(final_df$BS_prices >= final_df$Last & final_df$Type == "call"),
# sum(final_df$Bet_prices >= final_df$Last & final_df$Type == "call"),
# sum(final_df$Type == "call")
# )
# )
# #Short Calls
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices < final_df$Last & final_df$Type == "call"),
# sum(final_df$BS_prices < final_df$Last & final_df$Type == "call"),
# sum(final_df$Bet_prices < final_df$Last & final_df$Type == "call"),
# 0
# )
# )
# #Long Puts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices >= final_df$Last & final_df$Type == "put"),
# sum(final_df$BS_prices >= final_df$Last & final_df$Type == "put"),
# sum(final_df$Bet_prices >= final_df$Last & final_df$Type == "put"),
# sum(final_df$Type == "put")
# )
# )
# #Short Puts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices < final_df$Last & final_df$Type == "put"),
# sum(final_df$BS_prices < final_df$Last & final_df$Type == "put"),
# sum(final_df$Bet_prices < final_df$Last & final_df$Type == "put"),
# 0
# )
# )
# #Total Longs
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices >= final_df$Last),
# sum(final_df$BS_prices >= final_df$Last),
# sum(final_df$Bet_prices >= final_df$Last),
# nrow(final_df)
# )
# )
# #Total Long Capital
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$Last[final_df$HN_prices >= final_df$Last]),
# sum(final_df$Last[final_df$BS_prices >= final_df$Last]),
# sum(final_df$Last[final_df$Bet_prices >= final_df$Last]),
# sum(final_df$Last)
# )
# )
# #Total Shorts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_prices < final_df$Last),
# sum(final_df$BS_prices < final_df$Last),
# sum(final_df$Bet_prices < final_df$Last),
# 0
# )
# )
# #Total Short Capital
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$Last[final_df$HN_prices < final_df$Last]),
# sum(final_df$Last[final_df$BS_prices < final_df$Last]),
# sum(final_df$Last[final_df$Bet_prices < final_df$Last]),
# 0
# )
# )
# #Total positive contracts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff>=0),
# sum(final_df$BS_payoff>=0),
# sum(final_df$Bet_payoff>=0),
# sum(final_df$PDAbsolute>=0)
# )
# )
# #Total negative contracts
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff<0),
# sum(final_df$BS_payoff<0),
# sum(final_df$Bet_payoff<0),
# sum(final_df$PDAbsolute<0)
# )
# )
# #Total profit
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff>=0]),
# sum(final_df$BS_payoff[final_df$BS_payoff>=0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff>=0]),
# sum(final_df$PDAbsolute[final_df$PDAbsolute>=0])
# )
# )
# #Total loss
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff<0]),
# sum(final_df$BS_payoff[final_df$BS_payoff<0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff<0]),
# sum(final_df$PDAbsolute[final_df$PDAbsolute<0])
# )
# )
# #P&L (Balance)
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff>=0]) + sum(final_df$HN_payoff[final_df$HN_payoff<0]),
# sum(final_df$BS_payoff[final_df$BS_payoff>=0]) + sum(final_df$BS_payoff[final_df$BS_payoff<0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff>=0]) + sum(final_df$Bet_payoff[final_df$Bet_payoff<0]),
# sum(final_df$PDAbsolute[final_df$PDAbsolute>=0]) + sum(final_df$PDAbsolute[final_df$PDAbsolute<0])
# )
# )
# #Wins - Contract
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# sum(final_df$HN_payoff<0 & final_df$BS_payoff>=0),
# sum(final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0) + sum(final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# NA
# )
# )
# #Losses - Contract
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff<0 & final_df$BS_payoff>=0),
# sum(final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# sum(final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0) + sum(final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0),
# NA
# )
# )
# #Wins - Dollars
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# sum(final_df$BS_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0]) + sum(final_df$Bet_payoff[final_df$Bet_payoff >= 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# NA
# )
# )
# #Losses - Dollars
# benchmark_table<-rbind(benchmark_table,c(
# sum(final_df$HN_payoff[final_df$HN_payoff<0 & final_df$BS_payoff>=0]),
# sum(final_df$BS_payoff[final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff<0 & final_df$BS_payoff>=0]) + sum(final_df$Bet_payoff[final_df$Bet_payoff < 0 & final_df$HN_payoff>=0 & final_df$BS_payoff<0]),
# NA
# )
# )
# colnames(benchmark_table)<-c("Heston Nandi", "Black Scholes", "Model Selection", "All Long")
# rownames(benchmark_table)<-c(
# "Long Calls - Contracts",
# "Short Calls - Contracts",
# "Long Puts - Contracts",
# "Short Puts - Contracts",
# "Total Longs - Contracts",
# "Total Longs - Dollars",
# "Total Shorts - Contracts",
# "Total Shorts - Dollars",
# "Total Nonnegative - Contracts",
# "Total Negative - Contracts",
# "Total Profit - Dollars",
# "Total Loss - Dollars",
# "PnL (Balance) - Dollars",
# "Wins - Contracts",
# "Losses - Contracts",
# "Wins - Dollars",
# "Losses - Dollars"
# )
# write.table(benchmark_table,paste0(data_path,"Benchmarks/",underlying_asset,"_",data_year,"_HN_vs_BS_benchmark_2710_marketarpe_summary.csv"),row.names=TRUE,sep=",")
# #####
# ####
# ggplot(bs_required,aes(x=NetMaturity,y=Moneyness,fill=arpe))+geom_tile()
# qplot(x=bs_required$NetMaturity,y=bs_required$Moneyness,geom="tile",fill=bs_required$arpe)
# ##
# hn_dectree<-tree(arpe ~ NetMaturity + Moneyness,data=hn_required)
# plot(hn_dectree)
########
### Plot P&L distribution
########
if(!("ggplot2" %in% rownames(installed.packages())))
install.packages("ggplot2")
library(ggplot2)
a2013_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2013_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
a2012_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2012_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
a2011_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2011_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
a2010_results<-read.table("~/Dropbox/PhD_Workshop/Output Files/Benchmarks/Mondrian/SPX_2010_pnl_with_realizations_1y_lookback_monthly_refresh_pnl.csv",header=TRUE,sep=",")
# HN_data_withdiv_asym_5y
select_density<-density(c(a2013_results$selection,a2012_results$selection,a2011_results$selection,a2010_results$selection))
select_density_selection<-density(c(a2013_results$selection,a2012_results$selection,a2011_results$selection,a2010_results$selection))
select_density_HN_A_5y<-density(c(a2013_results$HN_data_withdiv_asym_5y,a2012_results$HN_data_withdiv_asym_5y,a2011_results$HN_data_withdiv_asym_5y,a2010_results$HN_data_withdiv_asym_5y))
select_density_BS_5y<-density(c(a2013_results$BS_data_withdiv_5y,a2012_results$BS_data_withdiv_5y,a2011_results$BS_data_withdiv_5y,a2010_results$BS_data_withdiv_5y))
select_density_Levy_S_M_2y<-density(c(a2013_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric,a2012_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric,a2011_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric,a2010_results$Levy_GHYP_data_withdiv_MCMM_50000_iteration_2y_symmetric))
select_density_marketlong<-density(c(a2013_results$market_long,a2012_results$market_long,a2011_results$market_long,a2010_results$market_long))
my_x<-seq(min(c(select_density_selection$x,select_density_HN_A_5y$x,select_density_BS_5y$x,select_density_Levy_S_M_2y$x)),max(c(select_density_selection$x,select_density_HN_A_5y$x,select_density_BS_5y$x,select_density_Levy_S_M_2y$x)),length.out=1000)
my_x<-seq(-50,50,length.out=1000)
selection_y<-approx(select_density_selection$x,select_density_selection$y,my_x)$y
HN_A_5y_y<-approx(select_density_HN_A_5y$x,select_density_HN_A_5y$y,my_x)$y
BS_5y_y<-approx(select_density_BS_5y$x,select_density_BS_5y$y,my_x)$y
Levy_S_M_2y_y<-approx(select_density_Levy_S_M_2y$x,select_density_Levy_S_M_2y$y,my_x)$y
marketlong_y<-approx(select_density_marketlong$x,select_density_marketlong$y,my_x)$y
benchy<-data.frame(pnl=my_x,selection=selection_y,HN_A_5y=HN_A_5y_y,BS_5y=BS_5y_y,Levy_M_S_2y=Levy_S_M_2y_y,marketlong=marketlong_y)
pl2<-ggplot(data=benchy,aes(x=pnl)) + geom_line(aes(y=selection_y, color="selection")) + geom_line(aes(y=HN_A_5y, color="HN_A_5y")) + geom_line(aes(y=Levy_M_S_2y, color="Levy_M_S_2y")) + geom_line(aes(y=BS_5y, color="BS_5y"))+ geom_line(aes(y=marketlong, color="marketlong")) + labs(color = "Models") + xlab("Net Profit") + ylab("Density") #+ geom_line(aes(y=ghyp.def, color="GHYP.def"))
ggsave("~/Dropbox/PhD_Workshop/Reports/Progress Report 3 - 20150120/images/pnldensities_models_large.png",pl2,width=8,height=5)
plot(select_density)
#select_density<-density(some_results$selection)
my_x<-seq(-100,100,length.out=1000)
my_y<-approx(marketlong_y$x,marketlong_y$y,my_x)$y
plot(my_x,my_y)
|
testlist <- list(Beta = 0, CVLinf = -1.64624770369554e+260, FM = 6.10100294542834e+199, L50 = 0, L95 = 0, LenBins = c(6.32155852617032e-227, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -2.93744652054299e-306, SL95 = 6.07857795936184e+199, nage = -168430236L, nlen = 788529653L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615829698-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 529
|
r
|
testlist <- list(Beta = 0, CVLinf = -1.64624770369554e+260, FM = 6.10100294542834e+199, L50 = 0, L95 = 0, LenBins = c(6.32155852617032e-227, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -2.93744652054299e-306, SL95 = 6.07857795936184e+199, nage = -168430236L, nlen = 788529653L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
#----------------------------------------------#
#----------- Crop Parameters Function ---------#
#----------- DSSAT/CANEGRO ---------#
#----------------------------------------------#
#--- Function dedicated to create DSSAT/CANEGRO crop parameters files (.CUL and .ECO)
#--- Working directory with parameters values and master parameter file
wd = "C:/Users/Dropbox/MuriloVianna/Modeling/DSSAT_CANEGRO/Setup/crop_par"
#--- set the WD
setwd(wd)
#--- load parameters DB
culpar_dbfnm = "dssat_canegro_culpar.csv"
ecopar_dbfnm = "dssat_canegro_ecopar.csv"
culpar_db = read.csv(file = paste(wd,"/",culpar_dbfnm,sep=""))
ecopar_db = read.csv(file = paste(wd,"/",ecopar_dbfnm,sep=""))
#--- load functions to create .CUL and .ECO
source("dspar_sccan_f.R")
#--- write file.cul and file.eco
write(dscul_sccan047(culpar_db), file = "file.cul")
write(dseco_sccan047(ecopar_db), file = "file.eco")
|
/dspar_sccan_f/usage.R
|
no_license
|
Murilodsv/R-scripts
|
R
| false
| false
| 903
|
r
|
#----------------------------------------------#
#----------- Crop Parameters Function ---------#
#----------- DSSAT/CANEGRO ---------#
#----------------------------------------------#
#--- Function dedicated to create DSSAT/CANEGRO crop parameters files (.CUL and .ECO)
#--- Working directory with parameters values and master parameter file
wd = "C:/Users/Dropbox/MuriloVianna/Modeling/DSSAT_CANEGRO/Setup/crop_par"
#--- set the WD
setwd(wd)
#--- load parameters DB
culpar_dbfnm = "dssat_canegro_culpar.csv"
ecopar_dbfnm = "dssat_canegro_ecopar.csv"
culpar_db = read.csv(file = paste(wd,"/",culpar_dbfnm,sep=""))
ecopar_db = read.csv(file = paste(wd,"/",ecopar_dbfnm,sep=""))
#--- load functions to create .CUL and .ECO
source("dspar_sccan_f.R")
#--- write file.cul and file.eco
write(dscul_sccan047(culpar_db), file = "file.cul")
write(dseco_sccan047(ecopar_db), file = "file.eco")
|
'%ni%' <- function(x,y)!('%in%'(x,y))
|
/R/negate_util.R
|
no_license
|
lhenneman/hyspdisp
|
R
| false
| false
| 38
|
r
|
'%ni%' <- function(x,y)!('%in%'(x,y))
|
context("test-extent-example.R")
# Create an extent to triangulate
library(sf)
library(anglr)
library(raster)
my_extent <- st_as_sf(as(extent(c(153.185183093, 153.19443135, -27.705328446, -27.6967222119999)),
"SpatialPolygons"))
test_that("setting max area makes more triangles", {
## no change
expect_that(nrow(DEL(my_extent, max_area = 0.008)$triangle),
equals(nrow(DEL(my_extent, max_area = 0.0008)$triangle)))
expect_true(nrow(DEL(my_extent, max_area = 0.000008)$triangle) >
nrow(DEL(my_extent, max_area = 0.008)$triangle))
})
test_that("dataframes with only geometry are handled", {
expect_that(DEL(my_extent, max_area = 0.008), is_a("TRI"))
})
|
/tests/testthat/test-extent-example.R
|
no_license
|
DrRoad/anglr
|
R
| false
| false
| 711
|
r
|
context("test-extent-example.R")
# Create an extent to triangulate
library(sf)
library(anglr)
library(raster)
my_extent <- st_as_sf(as(extent(c(153.185183093, 153.19443135, -27.705328446, -27.6967222119999)),
"SpatialPolygons"))
test_that("setting max area makes more triangles", {
## no change
expect_that(nrow(DEL(my_extent, max_area = 0.008)$triangle),
equals(nrow(DEL(my_extent, max_area = 0.0008)$triangle)))
expect_true(nrow(DEL(my_extent, max_area = 0.000008)$triangle) >
nrow(DEL(my_extent, max_area = 0.008)$triangle))
})
test_that("dataframes with only geometry are handled", {
expect_that(DEL(my_extent, max_area = 0.008), is_a("TRI"))
})
|
## plot5.R
## Addressess assignment no. 5
library(dplyr)
## read in data
Em <-readRDS("./summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
## keep only data for Baltimore
EmBaltimore<-Em[Em$fips=="24510",]
## merge with SCC as key
XXX<-merge(EmBaltimore, SCC, by.x="SCC", by.y="SCC")
## keep only necessary columns
YYY<- transmute(XXX, year, Emissions, SCC.Level.Two)
## select vehicle-related data only
ZZZ<- YYY[grepl("Vehicle", YYY$SCC.Level.Two),]
## prepare data for plotting
PlotData<- aggregate(Emissions~year, data=ZZZ, sum)
## plot to png file
png("./plot5.png")
plot(PlotData$year, PlotData$Emissions, type="l",
main="Emissions from motor vehicles in Baltimore City",
xlab="year", ylab="Emissions", ylim=c(100, 450), col="darkred", lwd=2)
dev.off()
|
/plot5.R
|
no_license
|
jjedrzejowski/ExData_courseproject
|
R
| false
| false
| 794
|
r
|
## plot5.R
## Addressess assignment no. 5
library(dplyr)
## read in data
Em <-readRDS("./summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
## keep only data for Baltimore
EmBaltimore<-Em[Em$fips=="24510",]
## merge with SCC as key
XXX<-merge(EmBaltimore, SCC, by.x="SCC", by.y="SCC")
## keep only necessary columns
YYY<- transmute(XXX, year, Emissions, SCC.Level.Two)
## select vehicle-related data only
ZZZ<- YYY[grepl("Vehicle", YYY$SCC.Level.Two),]
## prepare data for plotting
PlotData<- aggregate(Emissions~year, data=ZZZ, sum)
## plot to png file
png("./plot5.png")
plot(PlotData$year, PlotData$Emissions, type="l",
main="Emissions from motor vehicles in Baltimore City",
xlab="year", ylab="Emissions", ylim=c(100, 450), col="darkred", lwd=2)
dev.off()
|
dependencies <- function(P) {
dep <- c()
if (P$fraction) return(union(dependencies(P$num), dependencies(P$den)))
if (P$sum) {
for (i in 1:length(P$children)) {
dep <- union(dep, dependencies(P$children[[i]]))
}
return(dep)
}
if (P$product) {
for (i in 1:length(P$children)) {
dep <- union(dep, dependencies(P$children[[i]]))
}
return(dep)
}
return(unique(c(P$var, P$cond, P$do)))
}
|
/R/dependencies.R
|
no_license
|
cran/causaleffect
|
R
| false
| false
| 447
|
r
|
dependencies <- function(P) {
dep <- c()
if (P$fraction) return(union(dependencies(P$num), dependencies(P$den)))
if (P$sum) {
for (i in 1:length(P$children)) {
dep <- union(dep, dependencies(P$children[[i]]))
}
return(dep)
}
if (P$product) {
for (i in 1:length(P$children)) {
dep <- union(dep, dependencies(P$children[[i]]))
}
return(dep)
}
return(unique(c(P$var, P$cond, P$do)))
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
automl.get.automl.test <- function() {
# Load data and split into train, valid and test sets
train <- h2o.uploadFile(locate("smalldata/testng/higgs_train_5k.csv"),
destination_frame = "higgs_train_5k")
test <- h2o.uploadFile(locate("smalldata/testng/higgs_test_5k.csv"),
destination_frame = "higgs_test_5k")
ss <- h2o.splitFrame(test, seed = 1)
valid <- ss[[1]]
test <- ss[[1]]
y <- "response"
x <- setdiff(names(train), y)
train[,y] <- as.factor(train[,y])
test[,y] <- as.factor(test[,y])
max_models <- 3
aml1 <- h2o.automl(y = y,
training_frame = train,
project_name="r_aml1",
stopping_rounds=3,
stopping_tolerance=0.001,
stopping_metric="AUC",
max_models=max_models,
seed=1234)
#Use h2o.getAutoML to get previous automl instance
get_aml1 <- h2o.getAutoML(aml1@project_name)
print("Leader model ID/project_name for original automl object")
print(aml1@leader@model_id)
print(aml1@project_name)
print("Leader model ID/project_name after fetching original automl object")
print(get_aml1@leader@model_id)
print(get_aml1@project_name)
expect_equal(aml1@project_name, get_aml1@project_name)
expect_equal(aml1@leader@model_id, get_aml1@leader@model_id)
expect_equal(aml1@leaderboard, get_aml1@leaderboard)
}
doTest("AutoML h2o.getAutoML Test", automl.get.automl.test)
|
/h2o-r/tests/testdir_algos/automl/runit_automl_get_automl.R
|
permissive
|
KR8T3R/h2o-3
|
R
| false
| false
| 1,675
|
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
automl.get.automl.test <- function() {
# Load data and split into train, valid and test sets
train <- h2o.uploadFile(locate("smalldata/testng/higgs_train_5k.csv"),
destination_frame = "higgs_train_5k")
test <- h2o.uploadFile(locate("smalldata/testng/higgs_test_5k.csv"),
destination_frame = "higgs_test_5k")
ss <- h2o.splitFrame(test, seed = 1)
valid <- ss[[1]]
test <- ss[[1]]
y <- "response"
x <- setdiff(names(train), y)
train[,y] <- as.factor(train[,y])
test[,y] <- as.factor(test[,y])
max_models <- 3
aml1 <- h2o.automl(y = y,
training_frame = train,
project_name="r_aml1",
stopping_rounds=3,
stopping_tolerance=0.001,
stopping_metric="AUC",
max_models=max_models,
seed=1234)
#Use h2o.getAutoML to get previous automl instance
get_aml1 <- h2o.getAutoML(aml1@project_name)
print("Leader model ID/project_name for original automl object")
print(aml1@leader@model_id)
print(aml1@project_name)
print("Leader model ID/project_name after fetching original automl object")
print(get_aml1@leader@model_id)
print(get_aml1@project_name)
expect_equal(aml1@project_name, get_aml1@project_name)
expect_equal(aml1@leader@model_id, get_aml1@leader@model_id)
expect_equal(aml1@leaderboard, get_aml1@leaderboard)
}
doTest("AutoML h2o.getAutoML Test", automl.get.automl.test)
|
#' DB to memory
#'
#' Assign the data from the database to the global environment
#' @param return_list Return the objects as a list, rather than assignation to global environment
#' @return Objects assigned to global environment
#' @export
db_to_memory <- function(return_list = FALSE){
out_list <- list()
# Read in all tables
# tables <- unique(dbListTables(pool))
tables <- c('cities',
'people',
'trip_meetings',
'trips',
'user_action_log',
'venue_events',
'venue_types',
'view_all_trips_people_meetings_venues',
'users')#,
# 'venue_events',
# 'venue_types'
# )
# Add the views to the tables
conn <- db_get_connection()
tables <- c(tables, 'view_trip_coincidences',
# 'events',
'view_trips_and_meetings')
for (i in 1:length(tables)){
this_table <- tables[i]
message(paste0('Reading in the ', this_table, ' from the database and assigning to global environment.'))
x <- get_data(tab = this_table,
schema = 'pd_wbgtravel',
connection_object = conn)
# Re-shape events before assigning to global environment
if(this_table == 'events'){
message(paste0('Restructuring events table'))
x <- x %>%
# Restructure like the events table
dplyr::rename(Person = short_name,
Organization = organization,
`City of visit` = city_name,
`Country of visit` = country_name,
Counterpart = trip_reason,
`Visit start` = trip_start_date,
`Visit end` = trip_end_date,
Lat = latitude,
Long = longitude,
Event = meeting_topic) %>%
dplyr::select(Person, Organization, `City of visit`, `Country of visit`,
Counterpart, `Visit start`, `Visit end`, Lat, Long, Event) %>%
distinct(Person, Organization, `City of visit`, `Country of visit`,
Counterpart, `Visit start`, `Visit end`,Event, .keep_all = TRUE)
}
if(return_list){
out_list[[i]] <- x
names(out_list)[i] <- this_table
} else {
assign(this_table,
x,
envir = .GlobalEnv)
}
}
db_release_connection(conn)
if(return_list){
return(out_list)
}
}
|
/R/db_to_memory.R
|
no_license
|
databrew/traveldash
|
R
| false
| false
| 2,484
|
r
|
#' DB to memory
#'
#' Assign the data from the database to the global environment
#' @param return_list Return the objects as a list, rather than assignation to global environment
#' @return Objects assigned to global environment
#' @export
db_to_memory <- function(return_list = FALSE){
out_list <- list()
# Read in all tables
# tables <- unique(dbListTables(pool))
tables <- c('cities',
'people',
'trip_meetings',
'trips',
'user_action_log',
'venue_events',
'venue_types',
'view_all_trips_people_meetings_venues',
'users')#,
# 'venue_events',
# 'venue_types'
# )
# Add the views to the tables
conn <- db_get_connection()
tables <- c(tables, 'view_trip_coincidences',
# 'events',
'view_trips_and_meetings')
for (i in 1:length(tables)){
this_table <- tables[i]
message(paste0('Reading in the ', this_table, ' from the database and assigning to global environment.'))
x <- get_data(tab = this_table,
schema = 'pd_wbgtravel',
connection_object = conn)
# Re-shape events before assigning to global environment
if(this_table == 'events'){
message(paste0('Restructuring events table'))
x <- x %>%
# Restructure like the events table
dplyr::rename(Person = short_name,
Organization = organization,
`City of visit` = city_name,
`Country of visit` = country_name,
Counterpart = trip_reason,
`Visit start` = trip_start_date,
`Visit end` = trip_end_date,
Lat = latitude,
Long = longitude,
Event = meeting_topic) %>%
dplyr::select(Person, Organization, `City of visit`, `Country of visit`,
Counterpart, `Visit start`, `Visit end`, Lat, Long, Event) %>%
distinct(Person, Organization, `City of visit`, `Country of visit`,
Counterpart, `Visit start`, `Visit end`,Event, .keep_all = TRUE)
}
if(return_list){
out_list[[i]] <- x
names(out_list)[i] <- this_table
} else {
assign(this_table,
x,
envir = .GlobalEnv)
}
}
db_release_connection(conn)
if(return_list){
return(out_list)
}
}
|
processFile = function(inFilePath, outFilePath) {
inCon = file(inFilePath, "r")
outCon = file(outFilePath, "w")
T = as.integer(readLines(inCon, n = 1))
for (casenum in 1:T){
info = unlist(strsplit(readLines(inCon, n = 1), " "))
hitpoints = as.numeric(info[1])
numSpells = as.integer(info[2])
spells = unlist(strsplit(readLines(inCon, n = 1)," "))
answer = answerFunc(hitpoints, spells)
writeOut = paste0("Case #", casenum, ": ", answer)
writeLines(writeOut, outCon)
}
close(inCon)
close(outCon)
}
answerFunc = function(hitpoints, spells){
answer = -1
for (spell in spells){
spellStats = getSpellStats(spell)
out = getSumProbs(
ndicePerRoll = spellStats$numDice
,nsidesPerDie = spellStats$diceType
,sumModifier = spellStats$addOn
,perDieMinOfOne = FALSE)
idx = which(out$probabilities[,'Sum'] >= hitpoints)
prob = sum(out$probabilities[,'Probability'][idx])
if (prob > answer){
answer = prob
}
}
return (answer)
}
getSpellStats = function(spell){
tmp = unlist(strsplit(spell, "d|\\+|\\-"))
numDice = as.integer(tmp[1])
diceType = as.integer(tmp[2])
addOnSign = ifelse(grepl("\\+", spell), 1, -1)
addOn = as.numeric(tmp[3]) * addOnSign
addOn = ifelse(is.na(addOn),0,addOn)
return (list(numDice = numDice, diceType = diceType, addOn = addOn))
}
library(dice)
inFilePath = './input/fighting_the_zombie.txt'
outFilePath = './output/fighting_the_zombie.txt'
processFile(inFilePath, outFilePath)
|
/Prelim/3.Zombie/script.R
|
no_license
|
dtfoster/FacebookHackCup2017
|
R
| false
| false
| 1,600
|
r
|
processFile = function(inFilePath, outFilePath) {
inCon = file(inFilePath, "r")
outCon = file(outFilePath, "w")
T = as.integer(readLines(inCon, n = 1))
for (casenum in 1:T){
info = unlist(strsplit(readLines(inCon, n = 1), " "))
hitpoints = as.numeric(info[1])
numSpells = as.integer(info[2])
spells = unlist(strsplit(readLines(inCon, n = 1)," "))
answer = answerFunc(hitpoints, spells)
writeOut = paste0("Case #", casenum, ": ", answer)
writeLines(writeOut, outCon)
}
close(inCon)
close(outCon)
}
answerFunc = function(hitpoints, spells){
answer = -1
for (spell in spells){
spellStats = getSpellStats(spell)
out = getSumProbs(
ndicePerRoll = spellStats$numDice
,nsidesPerDie = spellStats$diceType
,sumModifier = spellStats$addOn
,perDieMinOfOne = FALSE)
idx = which(out$probabilities[,'Sum'] >= hitpoints)
prob = sum(out$probabilities[,'Probability'][idx])
if (prob > answer){
answer = prob
}
}
return (answer)
}
getSpellStats = function(spell){
tmp = unlist(strsplit(spell, "d|\\+|\\-"))
numDice = as.integer(tmp[1])
diceType = as.integer(tmp[2])
addOnSign = ifelse(grepl("\\+", spell), 1, -1)
addOn = as.numeric(tmp[3]) * addOnSign
addOn = ifelse(is.na(addOn),0,addOn)
return (list(numDice = numDice, diceType = diceType, addOn = addOn))
}
library(dice)
inFilePath = './input/fighting_the_zombie.txt'
outFilePath = './output/fighting_the_zombie.txt'
processFile(inFilePath, outFilePath)
|
\name{training}
\alias{training}
\title{
creates an object of class training
}
\description{
Get a list of activity objects and a object of class athlete. It creates a new object of class training
}
\usage{
training(actls, ath)
}
\arguments{
\item{actls}{
list with activity class objects
}
\item{ath}{
object of class athlete
}
}
\value{
\item{tr}{
the new object of class training
}
}
|
/TRimpTest/man/training.Rd
|
no_license
|
Alefie/trimp
|
R
| false
| false
| 441
|
rd
|
\name{training}
\alias{training}
\title{
creates an object of class training
}
\description{
Get a list of activity objects and a object of class athlete. It creates a new object of class training
}
\usage{
training(actls, ath)
}
\arguments{
\item{actls}{
list with activity class objects
}
\item{ath}{
object of class athlete
}
}
\value{
\item{tr}{
the new object of class training
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_fclust.R
\name{plot_motifs_infos}
\alias{plot_motifs_infos}
\title{Plot reference graphs for checking that plot of sorted data are right}
\usage{
plot_motifs_infos(res, opt.sort = "performance")
}
\arguments{
\item{res}{the result of a functional clustering
obtained using the function \code{\link{fclust}}.}
\item{opt.sort}{a string,
that specifies the way for sorting the motifs.
It can be \code{"performance"} or \code{"name"},
indicating a sorting by motif performances,
or a sorting by motif names.}
}
\value{
Nothing. It is a procedure.
}
\description{
Plot two reference graphs for checking
that plot of sorted data are right:
the non-sorted assembly motifs
and assembly motifs sorted by decreasing mean observed performances.
}
\details{
This function was useful only for setting up the R-code.
It is now useful for users to check the resulst,
and be sure to the goodness of plotting ...
The written values are (form up to bottom):
names of assembly motifs, their effectifs,
the mean observed performance, the order
and the symbols (colour x symbol) systematically associated
with each assembly motif in all plots produced
by the package \code{functClust}.
}
\keyword{internal}
|
/man/plot_motifs_infos.Rd
|
no_license
|
cran/functClust
|
R
| false
| true
| 1,313
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_fclust.R
\name{plot_motifs_infos}
\alias{plot_motifs_infos}
\title{Plot reference graphs for checking that plot of sorted data are right}
\usage{
plot_motifs_infos(res, opt.sort = "performance")
}
\arguments{
\item{res}{the result of a functional clustering
obtained using the function \code{\link{fclust}}.}
\item{opt.sort}{a string,
that specifies the way for sorting the motifs.
It can be \code{"performance"} or \code{"name"},
indicating a sorting by motif performances,
or a sorting by motif names.}
}
\value{
Nothing. It is a procedure.
}
\description{
Plot two reference graphs for checking
that plot of sorted data are right:
the non-sorted assembly motifs
and assembly motifs sorted by decreasing mean observed performances.
}
\details{
This function was useful only for setting up the R-code.
It is now useful for users to check the resulst,
and be sure to the goodness of plotting ...
The written values are (form up to bottom):
names of assembly motifs, their effectifs,
the mean observed performance, the order
and the symbols (colour x symbol) systematically associated
with each assembly motif in all plots produced
by the package \code{functClust}.
}
\keyword{internal}
|
test_that("Checking levels in phytoplankton data", {
chk <- levels(rsphydat$valqual)
chk <- any(!chk %in% c('Very low', 'Low', 'Medium', 'High'))
expect_false(chk)
})
|
/tests/testthat/test-phylevs.R
|
permissive
|
tbep-tech/piney-point
|
R
| false
| false
| 182
|
r
|
test_that("Checking levels in phytoplankton data", {
chk <- levels(rsphydat$valqual)
chk <- any(!chk %in% c('Very low', 'Low', 'Medium', 'High'))
expect_false(chk)
})
|
##Figure 6.2
t <- seq(-3,3,0.05)
#Epanechnikov
D.e <- ifelse(abs(t)<=1,3/4*(1-t^2),0)
#Tri-cube
D.t <- ifelse(abs(t)<=1,(1-(abs(t))^3)^3,0)
#Gaussian
D.g <- dnorm(t,0,1)
plot(t, D.t, lty=1, type = "b", pch=16 ,col="black",xlab = "t" ,ylab = expression(K[lambda](x[0],x)))
lines(t, D.e, lty=1, type = "b", pch=16 ,col="red")
lines(t, D.g, lty=1, type = "b", pch=16 ,col="yellow")
legend(1.5,0.9, legend=c("Tri-cube","Epanechnikov","Gaussian"), fill = c("black","red","yellow"))
|
/Other/Kernel Smoothing Mehods/kernel03.R
|
no_license
|
chengong8225/Coded-by-Myself
|
R
| false
| false
| 491
|
r
|
##Figure 6.2
t <- seq(-3,3,0.05)
#Epanechnikov
D.e <- ifelse(abs(t)<=1,3/4*(1-t^2),0)
#Tri-cube
D.t <- ifelse(abs(t)<=1,(1-(abs(t))^3)^3,0)
#Gaussian
D.g <- dnorm(t,0,1)
plot(t, D.t, lty=1, type = "b", pch=16 ,col="black",xlab = "t" ,ylab = expression(K[lambda](x[0],x)))
lines(t, D.e, lty=1, type = "b", pch=16 ,col="red")
lines(t, D.g, lty=1, type = "b", pch=16 ,col="yellow")
legend(1.5,0.9, legend=c("Tri-cube","Epanechnikov","Gaussian"), fill = c("black","red","yellow"))
|
library(ggmap)
library(tidyverse)
library(stringr)
library(stringi)
library(refnet)
load("./output/eb_refined.Rdata")
world <- map_data("world")
zz <- address_lat_long(data=eb_refined)
plot_addresses_points(data=zz)
plot_addresses_country(data=zz)
s <- net_plot_coauthor(data=zz)
s$data
q <- net_plot_coauthor_country(data=zz)
q$plot
p <- net_plot_coauthor_address(data=zz)
p$plot
|
/using_refnet_troubleshooting/mapping_addresses.R
|
no_license
|
aurielfournier/refnet_materials
|
R
| false
| false
| 387
|
r
|
library(ggmap)
library(tidyverse)
library(stringr)
library(stringi)
library(refnet)
load("./output/eb_refined.Rdata")
world <- map_data("world")
zz <- address_lat_long(data=eb_refined)
plot_addresses_points(data=zz)
plot_addresses_country(data=zz)
s <- net_plot_coauthor(data=zz)
s$data
q <- net_plot_coauthor_country(data=zz)
q$plot
p <- net_plot_coauthor_address(data=zz)
p$plot
|
Q10_data = read.csv("data/processed/Q10_data.csv", na.strings = "") %>%
reorder_temp_levels(.) %>%
mutate(Ecosystem_type = tolower(Ecosystem_type),
Biome = tolower(Biome))
plot_temperature_ranges = function(Q10_data){
Q10_data_temps =
Q10_data %>%
distinct(Temp_range_old, Species, Incubation) %>%
separate(Temp_range_old, sep = "_", into = c("temp_start", "temp_stop")) %>%
mutate(temp_start = as.numeric(temp_start),
temp_stop = as.numeric(temp_stop))
Q10_data_temps %>%
filter(!is.na(Species) & !is.na(Incubation)) %>%
arrange(temp_start, temp_stop) %>%
mutate(rownames_to_column(., "y")) %>%
ggplot(aes(y = y))+
geom_point(aes(x = temp_start), color = "red")+
geom_point(aes(x = temp_stop), color = "black")+
geom_segment(aes(x = temp_start, xend = temp_stop, yend = y))+
theme_bw()+
scale_x_continuous(minor_breaks = seq(-20, 50, 5))+
theme(axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.line.y = element_blank())+
labs(title = "all data, all temperature ranges")+
facet_grid(Species ~ Incubation)
}
plot_mat_map = function(Q10_data){
Q10_data %>%
filter(!is.na(Species)) %>%
ggplot(aes(x = MAT_C, y = MAP_mm))+
geom_point(aes(color = Species), size = 2)+
theme_classic()
}
UDel_summarized_climate = read.csv("UDel_summarized_climate.csv")
UDel_summarized_climate %>%
ggplot(aes(x = Longitude, y = Latitude, color = MAT))+
geom_point()
Q10_MAT =
Q10_data %>%
mutate(Latitude2 = round(Latitude*2)/2,
Longitude2 = round(Longitude*2)/2,
Lat_dif = ifelse(Latitude2 - Latitude >=0, 0.25, -0.25),
Lon_dif = ifelse(Longitude2 - Longitude >=0, 0.25, -0.25),
Latitude2 = Latitude2 - Lat_dif,
Longitude2 = Longitude2 - Lon_dif) %>%
dplyr::select(-Lat_dif, -Lon_dif) %>%
left_join(UDel_summarized_climate, by=c("Latitude2"="Latitude", "Longitude2"="Longitude")) %>%
left_join(KoeppenGeigerASCII, by=c("Latitude2"="Latitude", "Longitude2"="Longitude"))
Q10_MAT %>%
filter(!is.na(Species)) %>%
filter(Species %in% c("CO2", "CH4")) %>%
ggplot(aes(x = MAT, y = MAP))+
geom_point(aes(color = ClimateTypes_2), size = 2)+
facet_wrap(~Species)+
theme_classic()
KoeppenGeigerASCII = readxl::read_xlsx("KoeppenGeigerASCII.xlsx")
KoeppenGeigerASCII %>%
ggplot(aes(x = Longitude, y = Latitude, color = ClimateTypes))+
geom_point()
KoeppenGeigerASCII =
KoeppenGeigerASCII %>%
mutate(ClimateTypes_2 = case_when(grepl("A", ClimateTypes) ~ "equatorial",
grepl("B", ClimateTypes) ~ "arid",
grepl("C", ClimateTypes) ~ "temperate",
grepl("D", ClimateTypes) ~ "snow",
grepl("E", ClimateTypes) ~ "polar"))
KoeppenGeigerASCII %>%
ggplot(aes(x = Longitude, y = Latitude, color = ClimateTypes_2))+
geom_point()
KoeppenGeigerASCII_2 =
KoeppenGeigerASCII %>%
left_join(UDel_summarized_climate)
KoeppenGeigerASCII_2 %>%
ggplot(aes(x = MAT, y = MAP))+
geom_point(aes(color = ClimateTypes_2))
nonsnow =
Q10_MAT %>%
filter(Species == "CO2" & ClimateTypes_2 != "snow") %>%
filter(Q10 < 300) %>%
ggplot(aes(x = Incubation, y = Q10, color = Incubation, group = Incubation))+
geom_jitter(width = 0.2, size = 1)+
facet_wrap(~ClimateTypes_2, ncol = 4)
snow =
Q10_MAT %>%
filter(Species == "CO2" & ClimateTypes_2 == "snow") %>%
ggplot(aes(x = Incubation, y = Q10, color = Incubation, group = Incubation))+
geom_jitter(width = 0.2, size = 1)+
facet_wrap(~ClimateTypes_2, ncol = 4)
library(patchwork)
nonsnow + snow +
plot_layout(widths = c(4, 1),
guides = "collect") &
theme(legend.position = "top") &
labs(x = "")
|
/code/3a-functions-data_exploration.R
|
no_license
|
kaizadp/field_lab_q10
|
R
| false
| false
| 3,908
|
r
|
Q10_data = read.csv("data/processed/Q10_data.csv", na.strings = "") %>%
reorder_temp_levels(.) %>%
mutate(Ecosystem_type = tolower(Ecosystem_type),
Biome = tolower(Biome))
plot_temperature_ranges = function(Q10_data){
Q10_data_temps =
Q10_data %>%
distinct(Temp_range_old, Species, Incubation) %>%
separate(Temp_range_old, sep = "_", into = c("temp_start", "temp_stop")) %>%
mutate(temp_start = as.numeric(temp_start),
temp_stop = as.numeric(temp_stop))
Q10_data_temps %>%
filter(!is.na(Species) & !is.na(Incubation)) %>%
arrange(temp_start, temp_stop) %>%
mutate(rownames_to_column(., "y")) %>%
ggplot(aes(y = y))+
geom_point(aes(x = temp_start), color = "red")+
geom_point(aes(x = temp_stop), color = "black")+
geom_segment(aes(x = temp_start, xend = temp_stop, yend = y))+
theme_bw()+
scale_x_continuous(minor_breaks = seq(-20, 50, 5))+
theme(axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
axis.line.y = element_blank())+
labs(title = "all data, all temperature ranges")+
facet_grid(Species ~ Incubation)
}
plot_mat_map = function(Q10_data){
Q10_data %>%
filter(!is.na(Species)) %>%
ggplot(aes(x = MAT_C, y = MAP_mm))+
geom_point(aes(color = Species), size = 2)+
theme_classic()
}
UDel_summarized_climate = read.csv("UDel_summarized_climate.csv")
UDel_summarized_climate %>%
ggplot(aes(x = Longitude, y = Latitude, color = MAT))+
geom_point()
Q10_MAT =
Q10_data %>%
mutate(Latitude2 = round(Latitude*2)/2,
Longitude2 = round(Longitude*2)/2,
Lat_dif = ifelse(Latitude2 - Latitude >=0, 0.25, -0.25),
Lon_dif = ifelse(Longitude2 - Longitude >=0, 0.25, -0.25),
Latitude2 = Latitude2 - Lat_dif,
Longitude2 = Longitude2 - Lon_dif) %>%
dplyr::select(-Lat_dif, -Lon_dif) %>%
left_join(UDel_summarized_climate, by=c("Latitude2"="Latitude", "Longitude2"="Longitude")) %>%
left_join(KoeppenGeigerASCII, by=c("Latitude2"="Latitude", "Longitude2"="Longitude"))
Q10_MAT %>%
filter(!is.na(Species)) %>%
filter(Species %in% c("CO2", "CH4")) %>%
ggplot(aes(x = MAT, y = MAP))+
geom_point(aes(color = ClimateTypes_2), size = 2)+
facet_wrap(~Species)+
theme_classic()
KoeppenGeigerASCII = readxl::read_xlsx("KoeppenGeigerASCII.xlsx")
KoeppenGeigerASCII %>%
ggplot(aes(x = Longitude, y = Latitude, color = ClimateTypes))+
geom_point()
KoeppenGeigerASCII =
KoeppenGeigerASCII %>%
mutate(ClimateTypes_2 = case_when(grepl("A", ClimateTypes) ~ "equatorial",
grepl("B", ClimateTypes) ~ "arid",
grepl("C", ClimateTypes) ~ "temperate",
grepl("D", ClimateTypes) ~ "snow",
grepl("E", ClimateTypes) ~ "polar"))
KoeppenGeigerASCII %>%
ggplot(aes(x = Longitude, y = Latitude, color = ClimateTypes_2))+
geom_point()
KoeppenGeigerASCII_2 =
KoeppenGeigerASCII %>%
left_join(UDel_summarized_climate)
KoeppenGeigerASCII_2 %>%
ggplot(aes(x = MAT, y = MAP))+
geom_point(aes(color = ClimateTypes_2))
nonsnow =
Q10_MAT %>%
filter(Species == "CO2" & ClimateTypes_2 != "snow") %>%
filter(Q10 < 300) %>%
ggplot(aes(x = Incubation, y = Q10, color = Incubation, group = Incubation))+
geom_jitter(width = 0.2, size = 1)+
facet_wrap(~ClimateTypes_2, ncol = 4)
snow =
Q10_MAT %>%
filter(Species == "CO2" & ClimateTypes_2 == "snow") %>%
ggplot(aes(x = Incubation, y = Q10, color = Incubation, group = Incubation))+
geom_jitter(width = 0.2, size = 1)+
facet_wrap(~ClimateTypes_2, ncol = 4)
library(patchwork)
nonsnow + snow +
plot_layout(widths = c(4, 1),
guides = "collect") &
theme(legend.position = "top") &
labs(x = "")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iccf_functions.R
\name{iccf_core}
\alias{iccf_core}
\title{Compute the one-way Interpolated Cross-Correlation Function (ICCF)}
\usage{
iccf_core(t.1, x.1, t.2, x.2, tau, local.est = FALSE, cov = FALSE)
}
\arguments{
\item{t.1, x.1}{time and value for time series 1}
\item{t.2, x.2}{time and value for time series 2}
\item{tau}{(vector) list of lags at which to compute the CCF.}
\item{local.est}{(logical) use 'local' (not 'global') means and variances?}
\item{cov}{(logical) if \code{TRUE} then compute covariance, not correlation coefficient.}
}
\value{
A list with components
\item{r}{(array) A one dimensional array containing the correlation
coefficients at each lag.}
\item{n}{(array) A one dimensional array containing the number of pairs of
points used at each lag.}
}
\description{
\code{iccf_core} returns the basic interpolated correlation coefficients.
}
\details{
The main loop for the ICCF. In this part we take time series 1, \code{x.1} at
\code{t.1}, pair them with values from time series 2, \code{x.2} at
\code{t.1-tau[i]} produce by linearly interpolating between the nearest
values of \code{x.2}. At a given \code{tau[i]} we sum the product of the
paired \code{x.1} and \code{x.2} values \code{r[i] = (1/n) * sum(x.1 * x.2) /
(sd.1 * sd.2)} In the simplest case \code{n}, \code{sd.1} and \code{sd.2} are
constant and are the number of pairs at \code{lag=0} and the total
\code{sqrt(var)} of each time series. If \code{local.est = TRUE} then
\code{n}, \code{sd.1} and \code{sd.2} are evaluated 'locally' i.e. they are
vary for each lag \code{tau[i]}. In this case they are the number of good
pairs at lag \code{tau[i]}, and the \code{sqrt(vars)} of just the \code{x.1}
and \code{x.2} data points involved. We assume \code{x.1} and \code{x.2} have
zero sample mean.
}
\section{Notes}{
We assume that the input data \code{x.1} and \code{x.2} have been
mean-subtracted.
}
\examples{
## Example using NGC 5548 data
t1 <- cont$t
y1 <- cont$y - mean(cont$y)
t2 <- hbeta$t
y2 <- hbeta$y - mean(hbeta$y)
tau <- seq(-150, 150)
result <- iccf_core(t1, y1, t2, y2, tau = tau)
plot(tau, result$r, type = "l")
}
\seealso{
\code{\link{cross_correlate}}, \code{\link{iccf}}
}
|
/man/iccf_core.Rd
|
no_license
|
svdataman/sour
|
R
| false
| true
| 2,273
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iccf_functions.R
\name{iccf_core}
\alias{iccf_core}
\title{Compute the one-way Interpolated Cross-Correlation Function (ICCF)}
\usage{
iccf_core(t.1, x.1, t.2, x.2, tau, local.est = FALSE, cov = FALSE)
}
\arguments{
\item{t.1, x.1}{time and value for time series 1}
\item{t.2, x.2}{time and value for time series 2}
\item{tau}{(vector) list of lags at which to compute the CCF.}
\item{local.est}{(logical) use 'local' (not 'global') means and variances?}
\item{cov}{(logical) if \code{TRUE} then compute covariance, not correlation coefficient.}
}
\value{
A list with components
\item{r}{(array) A one dimensional array containing the correlation
coefficients at each lag.}
\item{n}{(array) A one dimensional array containing the number of pairs of
points used at each lag.}
}
\description{
\code{iccf_core} returns the basic interpolated correlation coefficients.
}
\details{
The main loop for the ICCF. In this part we take time series 1, \code{x.1} at
\code{t.1}, pair them with values from time series 2, \code{x.2} at
\code{t.1-tau[i]} produce by linearly interpolating between the nearest
values of \code{x.2}. At a given \code{tau[i]} we sum the product of the
paired \code{x.1} and \code{x.2} values \code{r[i] = (1/n) * sum(x.1 * x.2) /
(sd.1 * sd.2)} In the simplest case \code{n}, \code{sd.1} and \code{sd.2} are
constant and are the number of pairs at \code{lag=0} and the total
\code{sqrt(var)} of each time series. If \code{local.est = TRUE} then
\code{n}, \code{sd.1} and \code{sd.2} are evaluated 'locally' i.e. they are
vary for each lag \code{tau[i]}. In this case they are the number of good
pairs at lag \code{tau[i]}, and the \code{sqrt(vars)} of just the \code{x.1}
and \code{x.2} data points involved. We assume \code{x.1} and \code{x.2} have
zero sample mean.
}
\section{Notes}{
We assume that the input data \code{x.1} and \code{x.2} have been
mean-subtracted.
}
\examples{
## Example using NGC 5548 data
t1 <- cont$t
y1 <- cont$y - mean(cont$y)
t2 <- hbeta$t
y2 <- hbeta$y - mean(hbeta$y)
tau <- seq(-150, 150)
result <- iccf_core(t1, y1, t2, y2, tau = tau)
plot(tau, result$r, type = "l")
}
\seealso{
\code{\link{cross_correlate}}, \code{\link{iccf}}
}
|
#T2 gaza indicators
##### TO DO: create new function or adjust the IS_Gaza portion of the code to make it
#### create these new vars
nam <- names(d)[stringr::str_detect(names(d),"^usedd_[0-9]*$")]
num <- stringr::str_replace(nam,"usedd_","")
d[,first_1_21_usedd:=as.Date(NA)]
for(i in num ){
print(i)
var_usedd <- sprintf("usedd_%s",i)
var_usgestage <- sprintf("usgestage_%s",1)
d[!is.na(get(var_usedd)) &
!is.na(get(var_usgestage)) &
get(var_usgestage) > 0 &
get(var_usgestage) < 23 &
is.na(first_1_21_usedd),
first_1_21_usedd:=as.Date(get(var_usedd),format="%Y-%m-%d")]
}
# define data set
t2reboot <- d[bookdate>"2019-12-01" & ident_TRIAL_2_and_3==T,]
t2reboot[,smsyes:=areyouwillingtoreceivesmstextmessagesandremindersaboutyourvisits]
####### creating variables we need #######
# making bookgestage cats
t2reboot[,bookgestagecat:=cut(bookgestage,
breaks=c(0,14,17,22,23,28,30,33,34,37,40),
include.lowest=T)]
#### gestage variable today (days) for attendance ####
today <- lubridate::today()
t2reboot[,gAtoday_days:=as.numeric(NA)]
t2reboot[!is.na(first_1_21_usedd),
gAtoday_days:=as.numeric(
difftime(first_1_21_usedd,today, units="days"))]
t2reboot[is.na(first_1_21_usedd),
gAtoday_days:=as.numeric(difftime(lubridate::today(),booklmp,units="days"))]
# timely anc variables
t2reboot[,ancbefore15:=as.logical(NA)]
t2reboot[,anc15to17:=as.logical(NA)]
t2reboot[,anc18to22:=as.logical(NA)]
t2reboot[,anc23:=as.logical(NA)]
t2reboot[,anc24to28:=as.logical(NA)]
t2reboot[,anc29to30:=as.logical(NA)]
t2reboot[,anc31to33:=as.logical(NA)]
t2reboot[,anc34:=as.logical(NA)]
t2reboot[,anc35to37:=as.logical(NA)]
t2reboot[,anc38to40:=as.logical(NA)]
vars_gestage <- stringr::str_subset(names(t2reboot),"^angestage_[0-9]+")
vars_anevent <- stringr::str_subset(names(t2reboot),"^anevent_[0-9]+")
for(i in vars_gestage){
t2reboot[get(vars_gestage)>0 & get(vars_gestage)<=14 &
!is.na(vars_anevent),ancbefore15:=TRUE]
t2reboot[get(vars_gestage)>=15 & get(vars_gestage)<=17 &
!is.na(vars_anevent),anc15to17:=TRUE]
t2reboot[get(vars_gestage)>=18 & get(vars_gestage)<=22 &
!is.na(vars_anevent),anc18to22:=TRUE]
t2reboot[get(vars_gestage)==23 &!is.na(vars_anevent),anc23:=TRUE]
t2reboot[get(vars_gestage)>=24 & get(vars_gestage)<=28 &
!is.na(vars_anevent),anc24to28:=TRUE]
t2reboot[get(vars_gestage)>=29 & get(vars_gestage)<=30 &
!is.na(vars_anevent),anc29to30:=TRUE]
t2reboot[get(vars_gestage)>=31 & get(vars_gestage)<=33 &
!is.na(vars_anevent),anc31to33:=TRUE]
t2reboot[get(vars_gestage)==34 & !is.na(vars_anevent),anc34:=TRUE]
t2reboot[get(vars_gestage)>=35 & get(vars_gestage)<=37 &
!is.na(vars_anevent),anc35to37:=TRUE]
t2reboot[get(vars_gestage)>=38 & get(vars_gestage)<=40 &
!is.na(vars_anevent),anc38to40:=TRUE]
}
t2nums <- t2reboot[,.(N=.N,
Booked=sum(ident_dhis2_booking==T, na.rm=T),
ANCvisits=sum(ident_dhis2_an==T, na.rm=T),
BookedSMSyes=sum(smsyes==1,na.rm=T),
BookedSMSMno=sum(smsyes==0, na.rm=T),
BookedSMSmiss=sum(is.na(smsyes)),
BookedSMSonly=sum(ident_TRIAL_2==T &
ident_TRIAL_3==F, na.rm=T),
BookedinSMSclinic=sum(ident_TRIAL_2==T, na.rm=T),
BookedQIDonly=sum(ident_TRIAL_2==F & ident_TRIAL_3==T),
BookdQID=sum(ident_TRIAL_3==T, na.rm=T),
BookedBoth=sum(ident_TRIAL_3==T & ident_TRIAL_2==T),
BookedControl=sum(ident_TRIAL_2_3_Control, na.rm=T)
)]
openxlsx::write.xlsx(t2nums,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf("%s_recruit_update_by_arm.xlsx",
lubridate::today())))
# bookgestage cats
t2bookcats <- t2reboot[,.(N=.N),
keyby=.(bookgestagecat)]
openxlsx::write.xlsx(t2bookcats,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf(
"%s_recruit_update_bookgestage.xlsx",
lubridate::today())))
#anc visits timely and not timely
vars <- names(t2reboot)[stringr::str_detect(names(t2reboot),"^anevent_[0-9]+")]
t2reboot[,anevent_x:=0]
print(vars)
for(i in vars){
t2reboot[!is.na(get(i)), anevent_x:=anevent_x + 1]
}
sum(t2reboot[ident_dhis2_control==F]$anevent_x,na.rm=T)
# visits per clinic
t2visits <- t2reboot[,.(N=.N,
bookingvisits=sum(!is.na(bookevent), na.rm=T),
expectedtohavedelivered=sum(gAtoday_days>=280 &
gAtoday_days<=300),
ancvisits=sum(anevent_x, na.rm=T),
ancb415=sum(ancbefore15, na.rm=T),
anc15to17=sum(anc15to17, na.rm=T),
anc18to22=sum(anc18to22, na.rm=T),
anc23=sum(anc23, na.rm=T),
anc24to28=sum(anc24to28, na.rm=T),
anc29to30=sum(anc29to30, na.rm=T),
anc31to33=sum(anc31to33, na.rm=T),
anc34=sum(anc34, na.rm=T),
anc35to37=sum(anc35to37, na.rm=T)),
keyby=.(ident_TRIAL_2_3_Control,str_TRIAL_2_Cluster)]
openxlsx::write.xlsx(t2visits,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf(
"%s_recruit_update_visits_by_clinic.xlsx",
lubridate::today())))
t2visits <- t2reboot[,.(N=.N,
bookingvisits=sum(!is.na(bookevent), na.rm=T),
ancvisits=sum(anevent_x, na.rm=T),
ancb415=sum(ancbefore15, na.rm=T),
anc15to17=sum(anc15to17, na.rm=T),
anc18to22=sum(anc18to22, na.rm=T),
anc23=sum(anc23, na.rm=T),
anc24to28=sum(anc24to28, na.rm=T),
anc29to30=sum(anc29to30, na.rm=T),
anc31to33=sum(anc31to33, na.rm=T),
anc34=sum(anc34, na.rm=T),
anc35to37=sum(anc35to37, na.rm=T))]
openxlsx::write.xlsx(t2visits,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf("%s_recruit_update_visits.xlsx",
lubridate::today())))
# by bookgestage
t2visits <- t2reboot[,.(N=.N,
bookingvisits=sum(!is.na(bookevent), na.rm=T),
expectedtohavedelivered=sum(gAtoday_days>=280 &
gAtoday_days<=300),
ancvisits=sum(anevent_x, na.rm=T),
ancb415=sum(ancbefore15, na.rm=T),
anc15to17=sum(anc15to17, na.rm=T),
anc18to22=sum(anc18to22, na.rm=T),
anc23=sum(anc23, na.rm=T),
anc24to28=sum(anc24to28, na.rm=T),
anc29to30=sum(anc29to30, na.rm=T),
anc31to33=sum(anc31to33, na.rm=T),
anc34=sum(anc34, na.rm=T),
anc35to37=sum(anc35to37, na.rm=T)),
keyby=.(bookgestagecat)]
openxlsx::write.xlsx(t2visits,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf(
"%s_recruit_update_visits_by_bookgestage.xlsx",
lubridate::today())))
####################### Process outcomes #######################
t2reboot[,bookgestagedays_cats:=cut(bookgestagedays,
breaks=c(-500,0,104,
125,160,167,202,
216,237,244,265,293),
include.lowest=T)]
# MAKE BOOK VISIT FOR ANEMIA
t2reboot[,booklabhb:=as.numeric(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,booklabhb:=labhb_1]
# MAKE BOOK VISIT FOR Laburglu
t2reboot[,booklaburglu:=as.character(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7 & laburglu_1%in%c("NEG","POS"),
booklaburglu:=laburglu_1]
t2reboot[,booklaburglu:=NULL]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,
booklaburglu:=laburglu_1]
xtabs(~t2reboot$booklaburglu)
str(t2reboot$booklaburglu)
unique(t2reboot$booklaburglu)
t2reboot[,booklaburglu:=NULL]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7 & laburglu_1%in%c("NEG","POS"),
booklaburglu:=laburglu_1]
xtabs(~t2reboot$booklaburglu)
# MAKE BOOK VISIT FOR LABBLOODGLU
t2reboot[,booklabbloodglu:=as.integer(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,booklabbloodglu:=labbloodglu_1]
xtabs(~t2reboot$booklabbloodglu, addNA=T)
# MAKE BOOK VISIT FOR LABBLOODGLU_HIGH
t2reboot[,booklabbloodglu_high:=as.logical(NA)]
t2reboot[!is.na(booklabbloodglu),booklabbloodglu_high:=FALSE]
t2reboot[booklabbloodglu>=140 & booklabbloodglu<500,booklabbloodglu_high:=TRUE]
xtabs(~t2reboot$booklabbloodglu_high, addNA=T)
# MAKE BOOK VISIT FOR LABFASTBLOODGLU
t2reboot[,booklabfastbloodglu:=as.numeric(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,booklabfastbloodglu:=labfastbloodglu_1]
xtabs(~t2reboot$booklabfastbloodglu)
# MAKE BOOK VISIT FOR LABfastBLOODGLU_HIGH
t2reboot[,booklabfastbloodglu_high:=as.logical(NA)]
t2reboot[!is.na(booklabfastbloodglu),booklabfastbloodglu_high:=FALSE]
t2reboot[booklabfastbloodglu>126 ,booklabfastbloodglu_high:=TRUE]
xtabs(~t2reboot$booklabfastbloodglu_high, addNA=T)
# Discrepancy Variable anexamsfh variable
t2reboot[,anexamsfh_0:=bookexamsfh]
t2reboot[,angestage_0:=bookgestage]
vars <- stringr::str_subset(names(t2reboot), "^anexamsfh_")
vars <- stringr::str_remove(vars, "anexamsfh_")
#anexamsfh stuff
for(i in vars){
print(i)
anexamsfh <-sprintf("anexamsfh_%s",i)
angestage <- sprintf("angestage_%s",i)
sfhDiscrep <- sprintf("sfhDiscrep_%s",i)
t2reboot[,(sfhDiscrep):=as.numeric(NA)]
t2reboot[!is.na(get(angestage)) &
!is.na(get(anexamsfh)), (sfhDiscrep):=abs(get(anexamsfh)-get(angestage))]
}
# SFH discrepancy with ancongestagesizevisitweek
vars <- stringr::str_subset(names(t2reboot), "^anconancgestationaageatvisitweeks_")
vars <- stringr::str_remove(vars, "anconancgestationaageatvisitweeks_")
#anconancgestationaageatvisitweeks var
for(i in vars){
print(i)
anconangestageweeks <-sprintf("anconancgestationaageatvisitweeks_%s",i)
angestage <- sprintf("angestage_%s",i)
sfhDiscrepCon <- sprintf("sfhDiscrepCon_%s",i)
t2reboot[,(sfhDiscrepCon):=as.numeric(NA)]
t2reboot[!is.na(get(angestage)) &
!is.na(get(anconangestageweeks)),
(sfhDiscrepCon):=abs(get(anconangestageweeks)-get(angestage))]
}
# SFH discrepancy with ancongestagesizevisitweek
vars <- stringr::str_subset(names(t2reboot), "^anconancgestationaageatvisitweeks_")
vars <- stringr::str_remove(vars, "anconancgestationaageatvisitweeks_")
#anconancgestationaageatvisitweeks var
for(i in vars){
print(i)
anconangestageweeks <-sprintf("anconancgestationaageatvisitweeks_%s",i)
angestage <- sprintf("angestage_%s",i)
sfhDiscrepCon <- sprintf("sfhDiscrepCon_%s",i)
t2reboot[,(sfhDiscrepCon):=as.numeric(NA)]
t2reboot[!is.na(get(angestage)) &
!is.na(get(anconangestageweeks)),
(sfhDiscrepCon):=abs(get(anconangestageweeks)-get(angestage))]
}
# anT1 in weeks to calculate sfhDiscrep via anexamsfh and anT1gestagedays to weeks
t2reboot[,anT1gestagedays_0:=bookgestagedays]
vars <- stringr::str_subset(names(t2reboot), "^anT1gestagedays_")
vars <- stringr::str_remove(vars, "^anT1gestagedays_")
for (i in vars){
anT1gestagedays <- sprintf("anT1gestagedays_%s",i)
anT1gAweeks <- sprintf("anT1gAweeks_%s",i)
t2reboot[, (anT1gAweeks):=floor(get(anT1gestagedays)/7)]
}
# Discrepancy Variable anexamsfh variable
t2reboot[,anexamsfh_0:=bookexamsfh]
vars <- stringr::str_subset(names(t2reboot), "^anexamsfh_")
vars <- stringr::str_remove(vars, "anexamsfh_")
# i in length will just print out the length
# i in 1:length same as seq_along. but seq_along preferred bc works when length of vector
#is 0. Use this if have predefined
# i in vars (object) take every single value of vars and loop through them
#anexamsfh stuff with anT1gAweeks
for(i in vars){
print(i)
anexamsfh <-sprintf("anexamsfh_%s",i)
anT1gAweeks <- sprintf("anT1gAweeks_%s",i)
sfhDiscrepAnt1gas <- sprintf("sfhDiscrepAnt1gas_%s",i)
sfhDiscrepAnt1gasCat <- sprintf("sfhDiscrepAnt1gasCat_%s",i)
t2reboot[!is.na(anT1gAweeks) &
!is.na(get(anexamsfh)), (sfhDiscrepAnt1gas):=abs(get(anexamsfh)-get(anT1gAweeks))]
t2reboot[!is.na(anT1gAweeks) &
!is.na(get(anexamsfh)), (sfhDiscrepAnt1gasCat):=abs(get(anexamsfh)-get(anT1gAweeks))>2]
}
# an exam malpresentation into one variable
vars_source <- names(d)[stringr::str_detect(names(t2reboot),"^anexampalp_")]
vars_outcome <- stringr::str_replace(vars_source, "anexampalp", "malpresanexam_")
for(i in 1:length(vars_source)){
var_source <- vars_source[i]
var_outcome <- vars_outcome[i]
t2reboot[get(var_source) %in% c("Trasverse", "Breech"), (var_outcome):="Yes"]
}
# uspres malpresentation variable
vars_source <- names(d)[stringr::str_detect(names(t2reboot),"^uspres_")]
vars_outcome <- stringr::str_replace(vars_source, "uspres_", "us_malpres_")
for(i in 1:length(vars_source)){
var_source <- vars_source[i]
var_outcome <- vars_outcome[i]
t2reboot[get(var_source) %in% c("Trasverse", "Breech"), (var_outcome):="Yes"]
}
VisitVariables <- function(t2reboot,days,variableOfInterestName,variableOfInterestPattern,TruevaluesMin=NULL,TruevaluesMax=NULL,TruevaluesDiscrete=NULL,gestagedaysVariable="anT1gestagedays" ){
if(!is.null(TruevaluesMin) & !is.null(TruevaluesMax) & !is.null(TruevaluesDiscrete)){
stop ("ALL TRUE VALUES NOT NULL")
}
if(is.null(TruevaluesMin) & is.null(TruevaluesMax) & is.null(TruevaluesDiscrete)){
stop ("ALL TRUE VALUES NULL")
}
# pull out a list of all of the gestage variables
#browser()
gestagedaysVariablewithcarrot <- sprintf("^%s",gestagedaysVariable)
listOfGestAgeVars <- names(t2reboot)[stringr::str_detect(names(t2reboot),gestagedaysVariablewithcarrot)]
listOfInterestVars <- stringr::str_replace(listOfGestAgeVars, gestagedaysVariable,variableOfInterestPattern)
for(i in 1:length(days)){
# name of new variable
var <- sprintf("TrialOne_%s_%s",variableOfInterestName,names(days)[i])
# initialize all as FALSE if has booking variable
t2reboot[!is.na(ident_dhis2_booking),(var):=FALSE]
#xtabs(~t2reboot[[var]])
# loop through the "gestage"/"bp" variables
for(j in 1:length(listOfGestAgeVars)){
gestageVar <- listOfGestAgeVars[j]
interestVar <- listOfInterestVars[j]
#asking discrete question
if(!is.null(TruevaluesDiscrete)){
t2reboot[!is.na(get(var)) & get(gestageVar) %in% days[[i]] & !is.na(get(interestVar)) & get(interestVar) %in% TruevaluesDiscrete ,(var):=TRUE]
}else{ #asking non discrete questions
t2reboot[!is.na(get(var)) & get(gestageVar) %in% days[[i]] & !is.na(get(interestVar)) & get(interestVar)>=TruevaluesMin & get(interestVar)<=TruevaluesMax, (var):=TRUE]
}
}
}
return(t2reboot)
}
###### identifying outcomes #######
# categories we want
days <- list(
"00_14"=c(-500:104),
"15_17"=c(105:125),
"18_22"=c(126:160),
"23_23"=c(161:167),
"24_28"=c(168:202),
"29_30"=c(203:216),
"31_33"=c(217:237),
"34_34"=c(238:244),
"35_37"=c(245:265),
"38_41"=c(266:293),
#using below vectors for managementsinstead of using two seperate vectors
"00_00"=0*7+c(0:6),
"01_01"=1*7+c(0:6),
"02_02"=2*7+c(0:6),
"03_03"=3*7+c(0:6),
"04_04"=4*7+c(0:6),
"05_05"=5*7+c(0:6),
"06_06"=6*7+c(0:6),
"07_07"=7*7+c(0:6),
"08_08"=8*7+c(0:6),
"09_09"=9*7+c(0:6),
"10_10"=10*7+c(0:6),
"11_11"=11*7+c(0:6),
"12_12"=12*7+c(0:6),
"13_13"=13*7+c(0:6),
"14_14"=14*7+c(0:6),
"15_15"=15*7+c(0:6),
"16_16"=16*7+c(0:6),
"17_17"=17*7+c(0:6),
"18_18"=18*7+c(0:6),
"19_19"=9*7+c(0:6),
"20_20"=20*7+c(0:6),
"21_21"=21*7+c(0:6),
"22_22"=22*7+c(0:6),
#"23_23"=23*7+c(0:6),
"24_24"=24*7+c(0:6),
"25_25"=25*7+c(0:6),
"26_26"=26*7+c(0:6),
"27_27"=27*7+c(0:6),
"28_28"=28*7+c(0:6),
"29_29"=29*7+c(0:6),
"30_30"=30*7+c(0:6),
"31_31"=31*7+c(0:6),
"32_32"=32*7+c(0:6),
"33_33"=33*7+c(0:6),
"34_34"=34*7+c(0:6),
"35_35"=35*7+c(0:6),
"36_36"=36*7+c(0:6),
"37_37"=37*7+c(0:6),
"38_38"=38*7+c(0:6),
"39_39"=39*7+c(0:6),
"40_40"=40*7+c(0:6),
"41_41"=41*7+c(0:6),
"42_42"=42*7+c(0:6)
)
###ANC Visits####
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anvisitnew",
variableOfInterestPattern="anT1gestagedays",
TruevaluesMin=-500,
TruevaluesMax=260,
gestagedaysVariable="anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
xtabs(~t2reboot$TrialOne_anvisitnew_00_00)
###ANC BP SYT ####
# BP SYST Present
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot<-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_present",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=60,
TruevaluesMax=170,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_present_00_00)
# BP Diast Present
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot<- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_present",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=40,
TruevaluesMax=170,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_present_00_14)
# BP Syst High
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_high",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=140,
TruevaluesMax=170,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_high_00_14)
# BP Syst MildHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_mildHTN",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=140,
TruevaluesMax=149,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_mildHTN_00_14)
# BP Syst ModSevHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_modSevHTN",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=150,
TruevaluesMax=170,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_modSevHTN_00_14)
# BP Diast High
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot <-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_high",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=90,
TruevaluesMax=200,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_high_00_14)
# BP Diast MildHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_mildHTN",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=90,
TruevaluesMax=99,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_mildHTN_00_14)
# BP Diast Mod/SevHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_modSevHTN",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=100,
TruevaluesMax=200,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_modSevHTN_00_14)
### ANC Anemia ####
# lab hb exists
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_exists",
variableOfInterestPattern="labhb",
TruevaluesMin=1,
TruevaluesMax=20,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
nrow(t2reboot[labhb_1>=4 & labhb_1<=20])
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
xtabs(~t2reboot$TrialOne_labhb_exists_15_17)
#normal hb
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_normal",
variableOfInterestPattern="labhb",
TruevaluesMin=11,
TruevaluesMax=20,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
xtabs(~t2reboot$TrialOne_labhb_normal_15_17, addNA=T)
# sev anemia
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_anemia_sev",
variableOfInterestPattern="labhb",
TruevaluesMin=1,
TruevaluesMax=6.9,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
nrow(t2reboot[labhb_1>=1 & labhb_1<7])
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
xtabs(~t2reboot$TrialOne_labhb_anemia_sev_15_17)
# mild and moderate anemia
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_anemia_mild_mod",
variableOfInterestPattern="labhb",
TruevaluesMin=7,
TruevaluesMax=10.9,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
nrow(t2reboot[labhb_1>=7 & labhb_1<11])
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
nrow(t2reboot[labgestage_1<=15 & labgestage_1<=17 & labhb_1>7 & labhb_1<11])
xtabs(~t2reboot$TrialOne_labhb_anemia_mild_mod_15_17, addNA=T)
### Lab RBS Normal ####
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,laburglu_0:=booklaburglu]
# normal urine glucose
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="laburglu_exists",
variableOfInterestPattern="laburglu",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete = c("POS", "NEG"),
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,laburglu_0:=NULL]
xtabs(~t2reboot$TrialOne_laburglu_exists_15_17)
# lab urglu pos
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,laburglu_0:=booklaburglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="laburglu_pos",
variableOfInterestPattern="laburglu",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete =c("POS"),
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,laburglu_0:=NULL]
nrow(t2reboot[laburglu_1=="POS" & labgestage_1>0 & labgestage_1<=14])
xtabs(~t2reboot$TrialOne_laburglu_pos_00_14)
# labbloodglu exist
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labbloodglu_0:=booklabbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labbloodglu_exists",
variableOfInterestPattern="labbloodglu",
TruevaluesMin=50,
TruevaluesMax=500,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labbloodglu_exists_15_17)
# high blood glucose
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labbloodglu_0:=booklabbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labbloodglu_high",
variableOfInterestPattern="labbloodglu",
TruevaluesMin=140,
TruevaluesMax=500,
TruevaluesDiscrete =NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labbloodglu_high_00_14)
xtabs(~t2reboot$TrialOne_labbloodglu_high_18_22)
# Lab FBS exists
#http://perinatology.com/Reference/Reference%20Ranges/Glucose,%20fasting.htm
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_exists",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=50,
TruevaluesMax=200,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_exists_15_17)
# Lab FBS Normal
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_normal",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=71,
TruevaluesMax=91,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_normal_15_17)
# Lab FBS likely GDM
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_likelyGDM",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=92,
TruevaluesMax=125,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_likelyGDM_24_28)
# Lab FBS High
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_high",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=126,
TruevaluesMax=500,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_high_24_28)
#### US visits ####
# Has US visit
t2reboot <-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_exists",
variableOfInterestPattern="usT1gestagedays",
TruevaluesMin=10,
TruevaluesMax=300,
TruevaluesDiscrete = NULL,
gestagedaysVariable ="usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_exists_00_14)
# US suspected IUGR
t2reboot <-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_iugrSuspected",
variableOfInterestPattern="usiugr",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete = 1,
gestagedaysVariable ="usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_iugrSuspected_00_14)
# US expected LGA
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_lgaSuspected",
variableOfInterestPattern="uslga",
TruevaluesMin=1,
TruevaluesMax=1,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_lgaSuspected_00_14)
# US pres-malpresentation
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_malpres",
variableOfInterestPattern="us_malpres",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete="Yes",
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_malpres_00_14)
# US pres-malpresentation
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_malpresvar",
variableOfInterestPattern="uspres",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete= c("Trasverse","Breech"),
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_malpresvar_00_14)
#uspres_checked
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_pres_checked",
variableOfInterestPattern="uspres",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete= c("Trasverse","Breech","Cephalic","Unknown"),
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_pres_checked_00_14, addNA=T)
### removed sfh discrepancies and anexampalp code from here
####Referrals####
# Ref to HR
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="refHR",
variableOfInterestPattern="mantypex",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete ="RefHighRisk",
gestagedaysVariable = "manT1gestagedays")
nrow(t2reboot[mantypex_1=="RefHighRisk" & manT1gestagedays_1>=15 & manT1gestagedays_1<=17])
nrow(t2reboot[mantypex_1=="RefHighRisk" & mangestage_1>=0 & mangestage_1<=14])
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_refHR_00_14)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_refHR_00_14)
xtabs(~t2reboot$TrialOne_refHR_35_37)
# Ref to Hosp
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="refHosp",
variableOfInterestPattern="mantypex",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete ="RefHosp",
gestagedaysVariable = "manT1gestagedays")
nrow(t2reboot[mantypex_1=="RefHosp" & mangestage_1>=0 & mangestage_1<=14])
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_refHosp_00_14)
xtabs(~t2reboot[ident_dhis2_control==F]$mantypex_1, addNA=T)
# RefDiabetes
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="refDiab",
variableOfInterestPattern="mantypex",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete ="RefDiabetes",
gestagedaysVariable = "manT1gestagedays")
nrow(t2reboot[mantypex_1=="RefDiabetes" & mangestage_1>=0 & mangestage_1<=14])
xtabs(~t2reboot$TrialOne_refDiab_00_14)
# Management Performed
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="manperf",
variableOfInterestPattern="manperf",
TruevaluesMin=1,
TruevaluesMax=1,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "manT1gestagedays")
xtabs(~t2reboot$TrialOne_manperf_18_22)
######### Managements ############
# take into account the 4 weeks after 37
#sev anemia
for(i in 0:37){
#i=23
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_manhb <- sprintf("TrialOne_manhb_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manhb <- "temp_manhb"
#id source
var_badhb <- sprintf("TrialOne_labhb_anemia_sev_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manhb):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badhb)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badhb)==TRUE, (var_temp_manhb):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manhb)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manhb):=TRUE]
}
#making var for sev anemia
t2reboot[,(var_manhb):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manhb):=get(var_temp_manhb)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manhb):=get(var_temp_manhb) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manhb):=NULL]
}
xtabs(~t2reboot$TrialOne_manhb_24_24)
#mild_mod anemia retest after one month
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(3:5), width=2, flag="0")
#output variable
var_manhb <- sprintf("TrialOne_manhb_mildmodhbret_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manhb <- "temp_manhb"
#id source
var_badhb <- sprintf("TrialOne_labhb_anemia_mild_mod_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manhb):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badhb)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badhb)==TRUE, (var_temp_manhb):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_labhb_exists_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second anemia check
#var_secondcheck <- sprintf("TrialOne_labhb_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manhb)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manhb):=TRUE]
}
#making var for sev anemia
t2reboot[,(var_manhb):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manhb):=get(var_temp_manhb)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manhb):=get(var_temp_manhb) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manhb):=NULL]
}
xtabs(~t2reboot$TrialOne_manhb_mildmodhbret_32_32)
#mild htn
#Urine stick AND LFT AND KFT AND ultrasound within a week
#refer to hospital if proteinuria
#ModsevGHTbpsyst
for(i in 0:37){
#i=23
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(3:4), width=2, flag="0")
#output variable
var_manght <- sprintf("TrialOne_manhtn_ModSev_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manght <- "temp_manght"
#id source
var_badght <- sprintf("TrialOne_anbpsyst_modSevHTN_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manght):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badght)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badght)==TRUE, (var_temp_manght):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second anemia check
var_secondcheck <- sprintf("TrialOne_anbpsyst_present_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manght)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manght):=TRUE]
}
#making var for sev anemia
t2reboot[,(var_manght):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manght):=get(var_temp_manght)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manght):=get(var_temp_manght) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manght):=NULL]
}
xtabs(~t2reboot$TrialOne_manhtn_ModSev_18_18)
# High RBG, RefHosp
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_mangdm <- sprintf("TrialOne_manRBGHigh_Hosp_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_mangdm <- "temp_mangdm"
#id source
var_badgdm <- sprintf("TrialOne_labbloodglu_high_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_mangdm):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badgdm)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badgdm)==TRUE, (var_temp_mangdm):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_mangdm)==FALSE & get(var_secondcheck)==TRUE, (var_temp_mangdm):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_mangdm):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_mangdm):=get(var_temp_mangdm)]
#intervention
t2reboot[ident_dhis2_control==F,(var_mangdm):=get(var_temp_mangdm) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_mangdm):=NULL]
}
xtabs(~t2reboot$TrialOne_manRBGHigh_Hosp_24_24)
# High RBG, RefHR
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_mangdm <- sprintf("TrialOne_manRBGHigh_HR_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_mangdm <- "temp_mangdm"
#id source
var_badgdm <- sprintf("TrialOne_labbloodglu_high_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_mangdm):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badgdm)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badgdm)==TRUE, (var_temp_mangdm):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHR_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_mangdm)==FALSE & get(var_secondcheck)==TRUE, (var_temp_mangdm):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_mangdm):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_mangdm):=get(var_temp_mangdm)]
#intervention
t2reboot[ident_dhis2_control==F,(var_mangdm):=get(var_temp_mangdm) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_mangdm):=NULL]
}
xtabs(~t2reboot$TrialOne_manRBGHigh_HR_24_24)
# High RBG, RefDIAB
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_mangdm <- sprintf("TrialOne_manRBGHigh_Diab_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_mangdm <- "temp_mangdm"
#id source
var_badgdm <- sprintf("TrialOne_labbloodglu_high_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_mangdm):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badgdm)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badgdm)==TRUE, (var_temp_mangdm):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refDiab_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_mangdm)==FALSE & get(var_secondcheck)==TRUE, (var_temp_mangdm):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_mangdm):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_mangdm):=get(var_temp_mangdm)]
#intervention
t2reboot[ident_dhis2_control==F,(var_mangdm):=get(var_temp_mangdm) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_mangdm):=NULL]
}
xtabs(~t2reboot$TrialOne_manRBGHigh_HR_24_24)
# malpresentation: us_malpres
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_manpres <- sprintf("TrialOne_manmalpres_us_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manpres <- "temp_manpres"
#id source
var_badpres <- sprintf("TrialOne_us_malpresvar_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manpres):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badpres)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badpres)==TRUE, (var_temp_manpres):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manpres)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manpres):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_manpres):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manpres):=get(var_temp_manpres)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manpres):=get(var_temp_manpres) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manpres):=NULL]
}
xtabs(~t2reboot$TrialOne_manmalpres_us_36_36)
# malpresentation: anexampalpmal
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_manpres <- sprintf("TrialOne_manmalpres_anexam_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manpres <- "temp_manpres"
#id source
var_badpres <- sprintf("TrialOne_anexampalpmal_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manpres):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badpres)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badpres)==TRUE, (var_temp_manpres):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manpres)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manpres):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_manpres):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manpres):=get(var_temp_manpres)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manpres):=get(var_temp_manpres) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manpres):=NULL]
}
xtabs(~t2reboot$TrialOne_manmalpres_anexam_35_35)
### iugr and lga stuff for managements was remoed from here
########################## Referred for any management ##########################
############ Ref Hosp ####################
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:0), width=2, flag="0")
#output variable
var_refHosp <- sprintf("TrialOne_manRef_Hosp_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_refHosp <- "temp_refHosp"
#id source
var_refHospsource <- sprintf("TrialOne_refHosp_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_refHosp):=as.logical(NA)]
# is false, if you have a referral
# intervention
t2reboot[get(var_refHospsource)==TRUE, (var_temp_manperf):=FALSE]
# everyone
#t2reboot[!is.na(get(var_refHospsource)), (var_temp_refHosp):=FALSE]
# control
t2reboot[get(var_refHospsource)==TRUE, (var_temp_refHosp):=TRUE]
for(week_later in weeks_later){
# working only on manperf check
var_manperf <- sprintf("TrialOne_manperf_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_manperf)==TRUE, (var_temp_manperf):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_refHosp):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_refHosp):=get(var_temp_refHosp)]
#intervention
t2reboot[ident_dhis2_control==F,(var_refHosp):=get(var_temp_manperf) &
get(var_temp_refHosp)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_refHosp):=NULL]
}
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_Hosp_35_35)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_Hosp_35_35)
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_Hosp_32_32)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_Hosp_32_32)
checkHosp <- t2reboot[!is.na(TrialOne_manRef_Hosp_32_32) &
ident_dhis2_control==F, c("TrialOne_manperf_32_32",
"TrialOne_refHosp_32_32",
"TrialOne_manRef_Hosp_32_32")]
########## Ref HR for any reason at any time point #########
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:0), width=2, flag="0")
#output variable
var_refHR <- sprintf("TrialOne_manRef_HR_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_refHR <- "temp_refHR"
#id source
var_refHRsource <- sprintf("TrialOne_refHR_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_refHR):=as.logical(NA)]
# is false, if you have a referral
# intervention
t2reboot[get(var_refHRsource)==TRUE, (var_temp_manperf):=FALSE]
# control
t2reboot[get(var_refHRsource)==TRUE, (var_temp_refHR):=TRUE]
for(week_later in weeks_later){
# working only on manperf check
var_manperf <- sprintf("TrialOne_manperf_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_manperf)==TRUE, (var_temp_manperf):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_refHR):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_refHR):=get(var_temp_refHR)]
#intervention
t2reboot[ident_dhis2_control==F,(var_refHR):=get(var_temp_manperf) &
get(var_temp_refHR)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_refHR):=NULL]
}
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_HR_35_35)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_HR_35_35)
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_HR_20_20)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_HR_20_20)
checkHR <- t2reboot[!is.na(TrialOne_manRef_HR_20_20) &
ident_dhis2_control==F, c("TrialOne_manperf_20_20",
"TrialOne_refHR_20_20",
"TrialOne_manRef_HR_20_20")]
##################### Process Outcomes #################
########## Anemia ##########
# Define opportunities at 3 different cut off points
## booked before 24
t2reboot[,Opportunity_anemia_screening_1:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]",
"(104,125]",
"(125,160]",
"(160,167]"),
Opportunity_anemia_screening_1:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_1, addNA=T)
## booked 24 or has visit
t2reboot[,Opportunity_anemia_screening_2:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(167,202]")|
TrialOne_anvisitnew_24_28==T,
Opportunity_anemia_screening_2:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_2, addNA=T)
# booked 29-34 weeks or has visit
t2reboot[,Opportunity_anemia_screening_3:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(202,216]",
"(216,237]",
"(237,244]"),
Opportunity_anemia_screening_3:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_3, addNA=T)
## booked or visit at 35-37 weeks
t2reboot[,Opportunity_anemia_screening_4:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(244,265]") |
TrialOne_anvisitnew_35_37==T,
Opportunity_anemia_screening_4:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_4, addNA=T)
## severe anemia at booking and at any other visit after that
t2reboot[,Opportunity_anemia_screening_5:=as.numeric(NA)]
t2reboot[TrialOne_labhb_anemia_sev_00_14==T|
TrialOne_labhb_anemia_sev_15_17==T|
TrialOne_labhb_anemia_sev_18_22==T|
TrialOne_labhb_anemia_sev_23_23==T,Opportunity_anemia_screening_5:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_5, addNA=T)
## mild mod anemia
t2reboot[,Opportunity_anemia_screening_6:=as.numeric(NA)]
t2reboot[TrialOne_labhb_anemia_mild_mod_00_14==T|
TrialOne_labhb_anemia_mild_mod_15_17==T|
TrialOne_labhb_anemia_mild_mod_18_22==T|
TrialOne_labhb_anemia_mild_mod_23_23==T,
Opportunity_anemia_screening_6:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_6, addNA=T)
# ADJUSTING OPPORTUNITIES FOR THOSE WHO HAVE BEEN REFERRED
## Before 24 weeks
#variable for man sev anemia anytime before 24 weeks
t2reboot[,manhbsev:=(TrialOne_manhb_00_00 |
TrialOne_manhb_01_01 |
TrialOne_manhb_02_02 |
TrialOne_manhb_03_03 |
TrialOne_manhb_04_04 |
TrialOne_manhb_05_05 |
TrialOne_manhb_06_06 |
TrialOne_manhb_07_07 |
TrialOne_manhb_08_08 |
TrialOne_manhb_09_09 |
TrialOne_manhb_10_10 |
TrialOne_manhb_11_11 |
TrialOne_manhb_12_12 |
TrialOne_manhb_13_13 |
TrialOne_manhb_14_14 |
TrialOne_manhb_15_15 |
TrialOne_manhb_16_16 |
TrialOne_manhb_17_17 |
TrialOne_manhb_18_18 |
TrialOne_manhb_19_19 |
TrialOne_manhb_20_20 |
TrialOne_manhb_21_21 |
TrialOne_manhb_22_22 |
TrialOne_manhb_23_23)]
xtabs(~t2reboot$manhbsev, addNA=T)
t2reboot[,RefHr:=as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1, RefHr:=FALSE]
t2reboot[(TrialOne_manRef_HR_00_00==T|
TrialOne_manRef_HR_01_01==T|
TrialOne_manRef_HR_02_02==T|
TrialOne_manRef_HR_03_03==T|
TrialOne_manRef_HR_04_04==T|
TrialOne_manRef_HR_05_05==T|
TrialOne_manRef_HR_06_06==T|
TrialOne_manRef_HR_07_07==T|
TrialOne_manRef_HR_08_08==T|
TrialOne_manRef_HR_09_09==T|
TrialOne_manRef_HR_10_10==T|
TrialOne_manRef_HR_11_11==T|
TrialOne_manRef_HR_12_12==T|
TrialOne_manRef_HR_13_13==T|
TrialOne_manRef_HR_14_14==T|
TrialOne_manRef_HR_15_15==T|
TrialOne_manRef_HR_16_16==T|
TrialOne_manRef_HR_17_17==T|
TrialOne_manRef_HR_18_18==T|
TrialOne_manRef_HR_19_19==T|
TrialOne_manRef_HR_20_20==T|
TrialOne_manRef_HR_21_21==T|
TrialOne_manRef_HR_22_22==T|
TrialOne_manRef_HR_23_23==T),
RefHr:=TRUE]
xtabs(~t2reboot$RefHr, addNA=T)
## At 24-28 weeks
t2reboot[Opportunity_anemia_screening_2==1 &
(TrialOne_anvisitnew_24_24 &
(RefHr==T))|
(TrialOne_anvisitnew_25_25 &
(RefHr==T|TrialOne_manRef_HR_24_24==T))|
(TrialOne_anvisitnew_26_26 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T))|
(TrialOne_anvisitnew_27_27 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T))|
(TrialOne_anvisitnew_28_28 &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T)),
Opportunity_anemia_screening_2:=Opportunity_anemia_screening_2-1]
xtabs(~t2reboot$Opportunity_anemia_screening_2, addNA=T)
# 35-37 weeks
t2reboot[Opportunity_anemia_screening_4==1 &
(TrialOne_anvisitnew_29_30==T &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T|
TrialOne_manRef_HR_28_28==T))|
(TrialOne_anvisitnew_31_33==T &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T|
TrialOne_manRef_HR_28_28==T|
TrialOne_manRef_HR_29_29==T|
TrialOne_manRef_HR_30_30==T))|
(TrialOne_anvisitnew_34_34==T &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T|
TrialOne_manRef_HR_28_28==T|
TrialOne_manRef_HR_29_29==T|
TrialOne_manRef_HR_30_30==T|
TrialOne_manRef_HR_31_31==T|
TrialOne_manRef_HR_32_32==T|
TrialOne_manRef_HR_33_33==T)),
Opportunity_anemia_screening_4:=Opportunity_anemia_screening_4-1]
xtabs(~t2reboot$Opportunity_anemia_screening_4, addNA=T)
#define different time cats for success
t2reboot[, HbonTime_1a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1, HbonTime_1a:=FALSE]
t2reboot[, HbonTime_1b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1 &
booklabhb<7 & booklabhb>=2,HbonTime_1b:=FALSE]
t2reboot[, HbonTime_1c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1 &
booklabhb>=7 & booklabhb<11,HbonTime_1c:=FALSE ]
# Hbontime_2
t2reboot[,HbonTime_2a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_2==1, HbonTime_2a:=FALSE]
t2reboot[, HbonTime_2b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_2==1 &
TrialOne_labhb_anemia_sev_24_28==T, HbonTime_2b:=FALSE]
t2reboot[,HbonTime_2c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_2==1 &
TrialOne_labhb_anemia_mild_mod_24_28==T, HbonTime_2c:=FALSE]
# 29-34 weeks
# Hbontime_3
t2reboot[, HbonTime_3a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_3==1 &
(!is.na(booklabhb)), HbonTime_3a:=FALSE]
t2reboot[, HbonTime_3b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_3==1 &
(booklabhb<7 & booklabhb>2), HbonTime_3b:=FALSE]
t2reboot[,HbonTime_3c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_3==1 &
(booklabhb<11 & booklabhb>=7), HbonTime_3c:=FALSE]
t2reboot[,HbonTime_4a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_4==1, HbonTime_4a:=FALSE]
t2reboot[,HbonTime_4b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_4==1 &
TrialOne_labhb_anemia_sev_35_37==T,HbonTime_4b:=FALSE]
t2reboot[,HbonTime_4c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_4==1 &
TrialOne_labhb_anemia_mild_mod_35_37==T, HbonTime_4c:=FALSE]
t2reboot[,HbonTime_5:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_5==1, HbonTime_5:=FALSE]
t2reboot[,HbonTime_6:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_6==1, HbonTime_6:=FALSE]
#hb on time 1, 2, 3, vars
#Screen at bookings before 24 weeks??
#check booklabhb values if normal etc
# booked before 24 weeks
t2reboot[HbonTime_1a==F & booklabhb>=11 &
booklabhb<=18, HbonTime_1a:=TRUE]
xtabs(~t2reboot$HbonTime_1a, addNA=T)
t2reboot[HbonTime_1b==F &
manhbsev==T,HbonTime_1b:=TRUE]
xtabs(~t2reboot$HbonTime_1b, addNA=T)
t2reboot[HbonTime_1c==F &
(TrialOne_manhb_mildmodhbret_00_00==T|
TrialOne_manhb_mildmodhbret_01_01==T|
TrialOne_manhb_mildmodhbret_02_02==T|
TrialOne_manhb_mildmodhbret_03_03==T|
TrialOne_manhb_mildmodhbret_04_04==T|
TrialOne_manhb_mildmodhbret_05_05==T|
TrialOne_manhb_mildmodhbret_06_06==T|
TrialOne_manhb_mildmodhbret_07_07==T|
TrialOne_manhb_mildmodhbret_08_08==T|
TrialOne_manhb_mildmodhbret_09_09==T|
TrialOne_manhb_mildmodhbret_10_10==T|
TrialOne_manhb_mildmodhbret_11_11==T|
TrialOne_manhb_mildmodhbret_12_12==T|
TrialOne_manhb_mildmodhbret_13_13==T|
TrialOne_manhb_mildmodhbret_14_14==T|
TrialOne_manhb_mildmodhbret_15_15==T|
TrialOne_manhb_mildmodhbret_16_16==T|
TrialOne_manhb_mildmodhbret_17_17==T|
TrialOne_manhb_mildmodhbret_18_18==T|
TrialOne_manhb_mildmodhbret_19_19==T|
TrialOne_manhb_mildmodhbret_20_20==T|
TrialOne_manhb_mildmodhbret_21_21==T|
TrialOne_manhb_mildmodhbret_22_22==T|
TrialOne_manhb_mildmodhbret_23_23==T),HbonTime_1c:=TRUE]
xtabs(~t2reboot$HbonTime_1c, addNA=T)
#24-28 screenings
t2reboot[HbonTime_2a==F &
TrialOne_labhb_normal_24_28==T, HbonTime_2a:=TRUE]
t2reboot[HbonTime_2b==F &
TrialOne_manhb_24_24==T|
TrialOne_manhb_25_25==T|
TrialOne_manhb_26_26==T|
TrialOne_manhb_27_27==T|
TrialOne_manhb_28_28==T, HbonTime_2b:=TRUE]
t2reboot[HbonTime_2c==F &
TrialOne_manhb_mildmodhbret_24_24==T|
TrialOne_manhb_mildmodhbret_25_25==T|
TrialOne_manhb_mildmodhbret_26_26==T|
TrialOne_manhb_mildmodhbret_27_27==T|
TrialOne_manhb_mildmodhbret_28_28==T, HbonTime_2c:=TRUE]
#booked 29-30, 31-33, 34
t2reboot[HbonTime_3a==F & Opportunity_anemia_screening_3==1 &
(booklabhb<=18 & booklabhb>11), HbonTime_3a:=TRUE]
t2reboot[HbonTime_3c==1 &
(TrialOne_manhb_mildmodhbret_29_29==T|
TrialOne_manhb_mildmodhbret_30_30==T|
TrialOne_manhb_mildmodhbret_31_31==T|
TrialOne_manhb_mildmodhbret_32_32==T|
TrialOne_manhb_mildmodhbret_33_33==T|
TrialOne_manhb_mildmodhbret_34_34==T),
HbonTime_3c:=TRUE]
t2reboot[HbonTime_3b==F &
(TrialOne_manhb_29_29==T|
TrialOne_manhb_30_30==T|
TrialOne_manhb_31_31==T|
TrialOne_manhb_32_32==T|
TrialOne_manhb_33_33==T|
TrialOne_manhb_34_34==T),
HbonTime_3b:=TRUE]
# 35-37 screenings
t2reboot[HbonTime_4a==F &
TrialOne_labhb_normal_35_37==T, HbonTime_4a:=TRUE]
t2reboot[HbonTime_4b==F &
TrialOne_manhb_35_35==T|
TrialOne_manhb_36_36==T|
TrialOne_manhb_37_37==T, HbonTime_4b:=TRUE]
t2reboot[HbonTime_4c==F &
TrialOne_manhb_mildmodhbret_35_35==T|
TrialOne_manhb_mildmodhbret_36_36==T|
TrialOne_manhb_mildmodhbret_37_37==T, HbonTime_4c:=TRUE]
# severe anemia outside of time windows
t2reboot[HbonTime_5==F &
(TrialOne_manhb_00_00==T|
TrialOne_manhb_01_01==T|
TrialOne_manhb_02_02==T|
TrialOne_manhb_03_03==T|
TrialOne_manhb_04_04==T|
TrialOne_manhb_05_05==T|
TrialOne_manhb_06_06==T|
TrialOne_manhb_07_07==T|
TrialOne_manhb_08_08==T|
TrialOne_manhb_09_09==T|
TrialOne_manhb_10_10==T|
TrialOne_manhb_11_11==T|
TrialOne_manhb_12_12==T|
TrialOne_manhb_13_13==T|
TrialOne_manhb_14_14==T|
TrialOne_manhb_15_15==T|
TrialOne_manhb_16_16==T|
TrialOne_manhb_17_17==T|
TrialOne_manhb_18_18==T|
TrialOne_manhb_19_19==T|
TrialOne_manhb_20_20==T|
TrialOne_manhb_21_21==T|
TrialOne_manhb_22_22==T|
TrialOne_manhb_23_23==T|
TrialOne_manhb_29_29==T|
TrialOne_manhb_30_30==T|
TrialOne_manhb_31_31==T|
TrialOne_manhb_32_32==T|
TrialOne_manhb_33_33==T|
TrialOne_manhb_34_34==T),HbonTime_5:=TRUE]
#mild/mod anem retest
t2reboot[HbonTime_6==F &
(TrialOne_manhb_mildmodhbret_00_00==T|
TrialOne_manhb_mildmodhbret_01_01==T|
TrialOne_manhb_mildmodhbret_02_02==T|
TrialOne_manhb_mildmodhbret_03_03==T|
TrialOne_manhb_mildmodhbret_04_04==T|
TrialOne_manhb_mildmodhbret_05_05==T|
TrialOne_manhb_mildmodhbret_06_06==T|
TrialOne_manhb_mildmodhbret_07_07==T|
TrialOne_manhb_mildmodhbret_08_08==T|
TrialOne_manhb_mildmodhbret_09_09==T|
TrialOne_manhb_mildmodhbret_10_10==T|
TrialOne_manhb_mildmodhbret_11_11==T|
TrialOne_manhb_mildmodhbret_12_12==T|
TrialOne_manhb_mildmodhbret_13_13==T|
TrialOne_manhb_mildmodhbret_14_14==T|
TrialOne_manhb_mildmodhbret_15_15==T|
TrialOne_manhb_mildmodhbret_16_16==T|
TrialOne_manhb_mildmodhbret_17_17==T|
TrialOne_manhb_mildmodhbret_18_18==T|
TrialOne_manhb_mildmodhbret_19_19==T|
TrialOne_manhb_mildmodhbret_20_20==T|
TrialOne_manhb_mildmodhbret_20_20==T|
TrialOne_manhb_mildmodhbret_21_21==T|
TrialOne_manhb_mildmodhbret_22_22==T|
TrialOne_manhb_mildmodhbret_23_23==T|
TrialOne_manhb_mildmodhbret_29_29==T|
TrialOne_manhb_mildmodhbret_30_30==T|
TrialOne_manhb_mildmodhbret_31_31==T|
TrialOne_manhb_mildmodhbret_32_32==T|
TrialOne_manhb_mildmodhbret_33_33==T|
TrialOne_manhb_mildmodhbret_34_34==T),
HbonTime_6:=TRUE]
prelimHB <- t2reboot[,.(N=.N,
Opportun_1=sum(Opportunity_anemia_screening_1, na.rm=T),
Success_1a=sum(HbonTime_1a, na.rm=T),
Success_1aFalse=sum(HbonTime_1a==FALSE, na.rm=T),
Success_1b=sum(HbonTime_1b, na.rm=T),
Success_1bFalse=sum(HbonTime_1b==FALSE, na.rm=T),
Success_1c=sum(HbonTime_1c, na.rm=T),
Success_1cFalse=sum(HbonTime_1c==FALSE, na.rm=T),
Opportun_2=sum(Opportunity_anemia_screening_2, na.rm=T),
Success_2a=sum(HbonTime_2a, na.rm=T),
Opportun_2=sum(Opportunity_anemia_screening_2, na.rm=T),
Success_2b=sum(HbonTime_2b, na.rm=T),
Success_2bFalse=sum(HbonTime_2b==F, na.rm=T),
Opportun_2=sum(Opportunity_anemia_screening_2, na.rm=T),
Success_2c=sum(HbonTime_2c, na.rm=T),
Success_2cFalse=sum(HbonTime_2c==F, na.rm=T),
Opportun_3=sum(Opportunity_anemia_screening_3, na.rm=T),
Success_3a=sum(HbonTime_3a, na.rm=T),
Success_3b=sum(HbonTime_3b, na.rm=T),
Success_3bFales=sum(HbonTime_3b==FALSE, na.rm=T),
Success_3c=sum(HbonTime_3c, na.rm=T),
Sucess_3cFalse=sum(HbonTime_3c==F, na.rm=T),
Opportun_4=sum(Opportunity_anemia_screening_4, na.rm=T),
Success_4a=sum(HbonTime_4a, na.rm=T),
Opportun_4=sum(Opportunity_anemia_screening_4, na.rm=T),
Success_4b=sum(HbonTime_4b, na.rm=T),
Screening4bF=sum(HbonTime_4b==F, na.rm=T),
Success_4c=sum(HbonTime_4c, na.rm=T),
Screening4cF=sum(HbonTime_4c==F, na.rm=T),
Opportun_5=sum(Opportunity_anemia_screening_5, na.rm=T),
Success_5=sum(HbonTime_5, na.rm=T),
success_5F=sum(HbonTime_5==F),
Opportun_6=sum(Opportunity_anemia_screening_6, na.rm=T),
Success_6=sum(HbonTime_6, na.rm=T),
success_6F=sum(HbonTime_6==F))]
openxlsx::write.xlsx(prelimHB,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruitment_prelim_Hb.xlsx",
lubridate::today())))
########## Attendance ##########
# making vars
t2reboot[,refHRhosp:= FALSE]
t2reboot[(TrialOne_manRef_HR_00_00==T|
TrialOne_manRef_HR_01_01==T|
TrialOne_manRef_HR_02_02==T|
TrialOne_manRef_HR_03_03==T|
TrialOne_manRef_HR_04_04==T|
TrialOne_manRef_HR_05_05==T|
TrialOne_manRef_HR_06_06==T|
TrialOne_manRef_HR_07_07==T|
TrialOne_manRef_HR_08_08==T|
TrialOne_manRef_HR_09_09==T|
TrialOne_manRef_HR_10_10==T|
TrialOne_manRef_HR_11_11==T|
TrialOne_manRef_HR_12_12==T|
TrialOne_manRef_HR_13_13==T|
TrialOne_manRef_HR_14_14==T)|
(TrialOne_manRef_Hosp_00_00==T|
TrialOne_manRef_Hosp_01_01==T|
TrialOne_manRef_Hosp_02_02==T|
TrialOne_manRef_Hosp_03_03==T|
TrialOne_manRef_Hosp_04_04==T|
TrialOne_manRef_Hosp_05_05==T|
TrialOne_manRef_Hosp_06_06==T|
TrialOne_manRef_Hosp_07_07==T|
TrialOne_manRef_Hosp_08_08==T|
TrialOne_manRef_Hosp_09_09==T|
TrialOne_manRef_Hosp_10_10==T|
TrialOne_manRef_Hosp_11_11==T|
TrialOne_manRef_Hosp_12_12==T|
TrialOne_manRef_Hosp_13_13==T|
TrialOne_manRef_Hosp_14_14==T),refHRhosp:=TRUE]
xtabs(~t2reboot$refHRhosp, addNA=T)
## Define Opportunities
# oppt 16 week visit
t2reboot[,Opp_1:= as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]"),Opp_1:=1]
t2reboot[bookgestagedays_cats %in% c("(0,104]") &
refHRhosp==T,Opp_1:=0]
xtabs(~t2reboot$Opp_1, addNA=T)
# oppt 18-22 visit
t2reboot[,Opp_2:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(104,125]")| Opp_1==1, Opp_2:=1]
xtabs(~t2reboot$Opp_2, addNA=T)
#removing opportunities
t2reboot[Opp_2==1 &
(TrialOne_manRef_HR_15_15==T|TrialOne_manRef_Hosp_15_15==T)|
(TrialOne_manRef_HR_16_16==T|TrialOne_manRef_Hosp_16_16==T)|
(TrialOne_manRef_HR_17_17==T|TrialOne_manRef_Hosp_17_17==T),
Opp_2:=Opp_2-1]
xtabs(~t2reboot$Opp_2, addNA=T)
# 24-28 week visit
t2reboot[,Opp_3:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(125,160]",
"(160,167]") | Opp_2==1, Opp_3:=1]
xtabs(~t2reboot$Opp_3, addNA=T)
# removing opportunities
t2reboot[Opp_3==1 & ((TrialOne_manRef_HR_18_18==T|TrialOne_manRef_Hosp_18_18==T)|
(TrialOne_manRef_HR_19_19==T|TrialOne_manRef_Hosp_19_19==T)|
(TrialOne_manRef_HR_20_20==T|TrialOne_manRef_Hosp_20_20==T)|
(TrialOne_manRef_HR_21_21==T |TrialOne_manRef_Hosp_21_21==T)|
(TrialOne_manRef_HR_22_22==T|TrialOne_manRef_Hosp_22_22==T)|
(TrialOne_manRef_HR_23_23==T|TrialOne_manRef_Hosp_23_23==T)),
Opp_3:=Opp_3-1]
xtabs(~t2reboot$Opp_3, addNA=T)
# 31-33 week visit
t2reboot[,Opp_4:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(160,167]",
"(167,202]",
"(202,216]")|Opp_3== 1, Opp_4:=1]
xtabs(~t2reboot$Opp_4, addNA=T)
# removing opportunities
t2reboot[Opp_4==1 &
((TrialOne_manRef_HR_24_24==T|TrialOne_manRef_Hosp_24_24==T)|
(TrialOne_manRef_HR_25_25==T|TrialOne_manRef_Hosp_25_25==T)|
(TrialOne_manRef_HR_26_26==T|TrialOne_manRef_Hosp_26_26==T)|
(TrialOne_manRef_HR_27_27==T|TrialOne_manRef_Hosp_27_27==T)|
(TrialOne_manRef_HR_28_28==T|TrialOne_manRef_Hosp_28_28==T)|
(TrialOne_manRef_HR_29_29==T|TrialOne_manRef_Hosp_29_29==T)|
(TrialOne_manRef_HR_30_30==T|TrialOne_manRef_Hosp_30_30==T)),
Opp_4:=Opp_4-1]
xtabs(~t2reboot$Opp_4, addNA=T)
# 35-37 week visit
t2reboot[,Opp_5:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(216,237]",
"(237,244]") | Opp_4==1, Opp_5:=1]
xtabs(~t2reboot$Opp_5, addNA=T)
t2reboot[Opp_5==1 &
((TrialOne_manRef_HR_31_31==T|TrialOne_manRef_Hosp_31_31==T)|
(TrialOne_manRef_HR_32_32==T|TrialOne_manRef_Hosp_32_32==T)|
(TrialOne_manRef_HR_33_33==T|TrialOne_manRef_Hosp_33_33==T)|
(TrialOne_manRef_HR_34_34==T|TrialOne_manRef_Hosp_34_34==T)),
Opp_5:=Opp_5-1]
xtabs(~t2reboot$Opp_5, addNA=T)
################ successes ##########
# 15-17 week visit
t2reboot[,Succ_1:=as.logical(NA)]
t2reboot[Opp_1==1, Succ_1:=FALSE]
t2reboot[Succ_1==F &
TrialOne_anvisitnew_15_17==T, Succ_1:=TRUE]
xtabs(~t2reboot$Succ_1, addNA=T)
# 18-22 week visit
t2reboot[,Succ_2:=as.logical(NA)]
t2reboot[Opp_2==1, Succ_2:=FALSE]
t2reboot[Succ_2==F & TrialOne_anvisitnew_18_22==T, Succ_2:=TRUE]
xtabs(~t2reboot$Succ_2, addNA=T)
# 24-28 week visit
t2reboot[,Succ_3:=as.logical(NA)]
t2reboot[Opp_3==1, Succ_3:=as.logical(FALSE)]
t2reboot[Succ_3==F & TrialOne_anvisitnew_24_28==T, Succ_3:=TRUE]
xtabs(~t2reboot$Succ_3, addNA=T)
# 31-33 week visit
t2reboot[,Succ_4:=as.logical(NA)]
t2reboot[Opp_4==1, Succ_4:=FALSE]
t2reboot[Succ_4==F & TrialOne_anvisitnew_31_33==T, Succ_4:=TRUE]
xtabs(~t2reboot$Succ_4, addNA=T)
# 35-37
t2reboot[,Succ_5:=as.logical(NA)]
t2reboot[Opp_5==1, Succ_5:=FALSE]
t2reboot[Succ_5==F & TrialOne_anvisitnew_35_37==T, Succ_5:=TRUE]
xtabs(~t2reboot$Succ_5, addNA=T)
prelimAtt <- t2reboot[gAtoday_days>=280 & gAtoday_days<=300,.(N=.N,
bookedb414=sum(bookgestagedays_cats=="(0,104]", na.rm = T),
ANC15_17Opps=sum(Opp_1,na.rm=T),
ANC15_17=sum(Succ_1, na.rm=T),
ANC15_17FALSE=sum(Succ_1==F, na.rm=T),
booked1515=sum(bookgestagedays_cats=="(104,125]", na.rm = T),
ANC18_22Opps=sum(Opp_2, na.rm=T),
ANC18_22=sum(Succ_2, na.rm=T),
ANC18_22FALSE=sum(Succ_2==F, na.rm=T),
booked1822=sum(bookgestagedays_cats=="(125,160]", na.rm = T),
booked2323=sum(bookgestagedays_cats=="(160,167]", na.rm = T),
ANC2428Opps=sum(!is.na(Opp_3), na.rm=T),
ANC24_28TRUE=sum(Succ_3, na.rm=T),
ANC24_28FALSE=sum(Succ_3==F, na.rm=T),
booked2428=sum(bookgestagedays_cats=="(167,202]", na.rm = T),
booked2930=sum(bookgestagedays_cats=="(202,216]", na.rm = T),
ANC31_33Opps=sum(Opp_4, na.rm=T),
ANC31_33=sum(Succ_4, na.rm=T),
ANC31_33FALSE=sum(Succ_4==F, na.rm=T),
Booked31_33=sum(bookgestagedays_cats=="(216,237]", na.rm = T),
Booked34_34=sum(bookgestagedays_cats=="(237,244]", na.rm = T),
ANC3537Opps=sum(Opp_5, na.rm=T),
ANC3537=sum(Succ_5, na.rm=T),
Booked35_37=sum(bookgestagedays_cats=="(244,265]",
na.rm = T))]
openxlsx::write.xlsx(prelimAtt,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruit_prelim_Attendance.xlsx",
lubridate::today())))
########## GDM ##########
###Redefining opportinites
t2reboot[,Opportunity_GDM_screening_1:=as.numeric(NA)]
t2reboot[,Opportunity_GDM_screening_2:=as.numeric(NA)]
t2reboot[,Opportunity_GDM_screening_3:=as.numeric(NA)]
t2reboot[,Opportunity_GDM_screening_4:=as.numeric(NA)]
#t2reboot[,Opportunity_GDM_Screening_5:=as.numeric(NA)]
# before 24
t2reboot[bookgestagedays_cats %in% c("(0,104]",
"(104,125]",
"(125,160]",
"(160,167]"),Opportunity_GDM_screening_1:=1]
#24-28
t2reboot[bookgestagedays_cats %in% c("(167,202]")|
TrialOne_anvisitnew_24_28==T,Opportunity_GDM_screening_2:=1]
# after 28
t2reboot[bookgestagedays_cats %in% c("(202,216]",
"(216,237]",
"(237,244]",
"(244,265]"), Opportunity_GDM_screening_3:=1]
# high rbs anywhere outside of the 24-28
t2reboot[(TrialOne_labbloodglu_high_00_14==T|
TrialOne_labbloodglu_high_15_17==T|
TrialOne_labbloodglu_high_18_22==T|
TrialOne_labbloodglu_high_23_23==T|
TrialOne_labbloodglu_high_29_30==T|
TrialOne_labbloodglu_high_31_33==T|
TrialOne_labbloodglu_high_34_34==T|
TrialOne_labbloodglu_high_35_37==T), Opportunity_GDM_screening_4:=1]
xtabs(~t2reboot$Opportunity_GDM_screening_1, addNA=T)
xtabs(~t2reboot$Opportunity_GDM_screening_2, addNA=T)
xtabs(~t2reboot$Opportunity_GDM_screening_3, addNA=T)
xtabs(~t2reboot$Opportunity_GDM_screening_4, addNA=T)
## Remove opportunities for people who were referred to HR or Hosp
#refHRHospmanRBG_1 rename to RefHr
t2reboot[,RefHr:=as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1, RefHr:=FALSE]
t2reboot[(TrialOne_manRef_HR_00_00==T|
TrialOne_manRef_HR_01_01==T|
TrialOne_manRef_HR_02_02==T|
TrialOne_manRef_HR_03_03==T|
TrialOne_manRef_HR_04_04==T|
TrialOne_manRef_HR_05_05==T|
TrialOne_manRef_HR_06_06==T|
TrialOne_manRef_HR_07_07==T|
TrialOne_manRef_HR_08_08==T|
TrialOne_manRef_HR_09_09==T|
TrialOne_manRef_HR_10_10==T|
TrialOne_manRef_HR_11_11==T|
TrialOne_manRef_HR_12_12==T|
TrialOne_manRef_HR_13_13==T|
TrialOne_manRef_HR_14_14==T|
TrialOne_manRef_HR_15_15==T|
TrialOne_manRef_HR_16_16==T|
TrialOne_manRef_HR_17_17==T|
TrialOne_manRef_HR_18_18==T|
TrialOne_manRef_HR_19_19==T|
TrialOne_manRef_HR_20_20==T|
TrialOne_manRef_HR_21_21==T|
TrialOne_manRef_HR_22_22==T|
TrialOne_manRef_HR_23_23==T),
RefHr:=TRUE]
xtabs(~t2reboot$RefHr, addNA=T)
#refHrHosp_2 rename to refHr_2
t2reboot[,refHr_2:=(
TrialOne_refHR_29_29==T|
TrialOne_refHR_30_30==T|
TrialOne_refHR_31_31==T|
TrialOne_refHR_32_32==T|
TrialOne_refHR_33_33==T|
TrialOne_refHR_34_34==T|
TrialOne_refHR_35_35==T|
TrialOne_refHR_36_36==T|
TrialOne_refHR_35_37==T)]
t2reboot[Opportunity_GDM_screening_2==1 &
(TrialOne_anvisitnew_24_24 &
(RefHr==T))|
(TrialOne_anvisitnew_25_25 &
(RefHr==T|TrialOne_manRef_HR_24_24==T))|
(TrialOne_anvisitnew_26_26 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T))|
(TrialOne_anvisitnew_27_27 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T))|
(TrialOne_anvisitnew_28_28 &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T)),
Opportunity_GDM_screening_2:=Opportunity_GDM_screening_2-1]
# checks
xtabs(~t2reboot$Opportunity_GDM_screening_2, addNA=T)
#Screening before 24 weeks: Creating one var for 3 possibilities
t2reboot[,screenb424:=as.logical(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]","(104,125]","(125,160]","(160,167]"),
screenb424:=F]
t2reboot[screenb424==F &
(booklabbloodglu_high==F | is.na(booklabbloodglu_high)) &
(!is.na(booklaburglu) | !is.na(booklabbloodglu)|!is.na(booklabfastbloodglu)),
screenb424:=T]
xtabs(~t2reboot$screenb424, addNA=T)
scrb424 <- t2reboot[,.(A=sum(ident_dhis2_control==T),
B=sum(ident_dhis2_control==F)),
keyby=.(screenb424)]
##Defining Successes
t2reboot[,GDMscreeningontime_1A:=as.logical(NA)]
t2reboot[,GDMscreeningontime_1B:=as.logical(NA)]
t2reboot[,GDMscreeningontime_1C:=as.logical(NA)]
t2reboot[screenb424==F,
GDMscreeningontime_1:=FALSE]
t2reboot[screenb424==T,
GDMscreeningontime_1:=TRUE]
xtabs(~t2reboot$GDMscreeningontime_1, addNA=T)
t2reboot[,GDMscreeningontime_1A:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_1==1 &
booklaburglu=="NEG",
GDMscreeningontime_1A:=TRUE]
t2reboot[,GDMscreeningontime_1B:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_1==1 &
booklaburglu=="POS" &
!is.na(booklabbloodglu), GDMscreeningontime_1B:=TRUE]
##### Need to add: and referred for 1C!!!! #####
t2reboot[,GDMscreeningontime_1C:=as.logical(NA)]
t2reboot[booklabbloodglu_high==T &
!is.na(booklabbloodglu), GDMscreeningontime_1C:=TRUE]
#24-28 weeks
t2reboot[,GDMscreeningontime_2:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_2==1 &
(TrialOne_labbloodglu_exists_24_24==F &
TrialOne_labbloodglu_exists_25_25==F &
TrialOne_labbloodglu_exists_26_26==F &
TrialOne_labbloodglu_exists_27_27==F &
TrialOne_labbloodglu_exists_28_28==F) &
(TrialOne_labfastbloodglu_exists_24_24==F &
TrialOne_labfastbloodglu_exists_25_25==F &
TrialOne_labfastbloodglu_exists_26_26==F &
TrialOne_labfastbloodglu_exists_27_27==F &
TrialOne_labfastbloodglu_exists_28_28==F), GDMscreeningontime_2:=F]
t2reboot[Opportunity_GDM_screening_2==1 &
(TrialOne_labbloodglu_exists_24_24==T|
TrialOne_labbloodglu_exists_25_25==T|
TrialOne_labbloodglu_exists_26_26==T|
TrialOne_labbloodglu_exists_27_27==T|
TrialOne_labbloodglu_exists_28_28==T) &
(TrialOne_labbloodglu_high_24_24==F|
TrialOne_labbloodglu_high_25_25==F|
TrialOne_labbloodglu_high_26_26==F|
TrialOne_labbloodglu_high_27_27==F|
TrialOne_labbloodglu_high_28_28==F)|
(TrialOne_labfastbloodglu_exists_24_24==T|
TrialOne_labfastbloodglu_exists_25_25==T|
TrialOne_labfastbloodglu_exists_26_26==T|
TrialOne_labfastbloodglu_exists_27_27==T|
TrialOne_labfastbloodglu_exists_28_28==T),GDMscreeningontime_2:=TRUE]
xtabs(~t2reboot$GDMscreeningontime_2, addNA=T)
#Screening after 28 weeks: Creating one var for 3 possibilities
t2reboot[,screenafter28:=as.logical(NA)]
t2reboot[bookgestagedays_cats %in% c("(202,216]","(216,237]","(237,244]","(244,265]"),
screenafter28:=F]
t2reboot[screenafter28==F &
(booklabbloodglu_high==F | is.na(booklabbloodglu_high)) &
(!is.na(booklabbloodglu)|!is.na(booklabfastbloodglu)),
screenafter28:=T]
xtabs(~t2reboot$screenafter28, addNA=T)
##Defining Success
t2reboot[,GDMscreeningontime_3:=as.logical(NA)]
t2reboot[screenafter28==F,
GDMscreeningontime_3:=FALSE]
t2reboot[screenafter28==T,GDMscreeningontime_3:=TRUE]
xtabs(~t2reboot$GDMscreeningontime_3, addNA=T)
#management fo high RBG outside of time windows
t2reboot[, GDMscreeningontime_4:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_4==1, GDMscreeningontime_4:= FALSE]
t2reboot[GDMscreeningontime_4==F &
(RefHr==T|refHr_2==T),GDMscreeningontime_4:=TRUE]
prelimGDM <- t2reboot[,.(N=.N,
Opportun_1=sum(Opportunity_GDM_screening_1==T, na.rm=T),
Success_1A=sum(GDMscreeningontime_1A==T, na.rm=T),
Success_1B=sum(GDMscreeningontime_1B==T, na.rm=T),
Success_1C=sum(GDMscreeningontime_1C==T, na.rm=T),
Screenb424=sum(screenb424==T, na.rm=T),
Screenb424False=sum(screenb424==F, na.rm=T),
Opportun_2=sum(Opportunity_GDM_screening_2, na.rm=T),
Success_2=sum(GDMscreeningontime_2==T, na.rm=T),
Opportun_3=sum(Opportunity_GDM_screening_3==T, na.rm=T),
Success_3=sum(GDMscreeningontime_3==T, na.rm=T),
screenafter28=sum(screenafter28==T, na.rm=T),
screenafter28False=sum(screenafter28==F, na.rm=T),
screenbtwn=sum(GDMscreeningontime_4==T, na.rm=T),
screenbtwnFalse=sum(GDMscreeningontime_4==F, na.rm=T),
Opportun_4=sum(Opportunity_GDM_screening_4==T, na.rm=T),
Succ_4=sum(GDMscreeningontime_4, na.rm=T),
keyby=.(str_TRIAL_2_Cluster))]
openxlsx::write.xlsx(prelimGDM,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruitment_prelim_GDM_percluster.xlsx",
lubridate::today())))
############## HTN ##############
# making vars
# refHRhosp variable made in attendance outcome
## Define Opportunities
# before 16 weeks
t2reboot[,bp_1a:= as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]"),
bp_1a:=1]
xtabs(~t2reboot$bp_1a, addNA=T)
# oppt 16 week visit
t2reboot[,bp_1:= as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(104,125]")| TrialOne_anvisitnew_15_17==T,
bp_1:=1]
t2reboot[bookgestagedays_cats %in% c("(104,125]") &
refHRhosp==T,bp_1:=0]
xtabs(~t2reboot$bp_1, addNA=T)
# oppt 18-22 visit
t2reboot[,bp_2:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(125,160]")|
TrialOne_anvisitnew_18_22==T, bp_2:=1]
xtabs(~t2reboot$bp_2, addNA=T)
#removing opportunities
t2reboot[bp_2==1 &
(TrialOne_manRef_HR_15_15==T|TrialOne_manRef_Hosp_15_15==T)|
(TrialOne_manRef_HR_16_16==T|TrialOne_manRef_Hosp_16_16==T)|
(TrialOne_manRef_HR_17_17==T|TrialOne_manRef_Hosp_17_17==T),
bp_2:=bp_2-1]
xtabs(~t2reboot$bp_2, addNA=T)
# 24-28 week visit
t2reboot[,bp_3:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(160,167]",
"(167,202]") |
TrialOne_anvisitnew_24_28==T, bp_3:=1]
xtabs(~t2reboot$bp_3, addNA=T)
# removing opportunities
t2reboot[bp_3==1 & ((TrialOne_manRef_HR_18_18==T|TrialOne_manRef_Hosp_18_18==T)|
(TrialOne_manRef_HR_19_19==T|TrialOne_manRef_Hosp_19_19==T)|
(TrialOne_manRef_HR_20_20==T|TrialOne_manRef_Hosp_20_20==T)|
(TrialOne_manRef_HR_21_21==T |TrialOne_manRef_Hosp_21_21==T)|
(TrialOne_manRef_HR_22_22==T|TrialOne_manRef_Hosp_22_22==T)|
(TrialOne_manRef_HR_23_23==T|TrialOne_manRef_Hosp_23_23==T)),
bp_3:=bp_3-1]
xtabs(~t2reboot$bp_3, addNA=T)
# 31-33 week visit
t2reboot[,bp_4:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(202,216]",
"(216,237]")|
TrialOne_anvisitnew_31_33==T, bp_4:=1]
xtabs(~t2reboot$bp_4, addNA=T)
# removing opportunities
t2reboot[bp_4==1 &
((TrialOne_manRef_HR_24_24==T|TrialOne_manRef_Hosp_24_24==T)|
(TrialOne_manRef_HR_25_25==T|TrialOne_manRef_Hosp_25_25==T)|
(TrialOne_manRef_HR_26_26==T|TrialOne_manRef_Hosp_26_26==T)|
(TrialOne_manRef_HR_27_27==T|TrialOne_manRef_Hosp_27_27==T)|
(TrialOne_manRef_HR_28_28==T|TrialOne_manRef_Hosp_28_28==T)|
(TrialOne_manRef_HR_29_29==T|TrialOne_manRef_Hosp_29_29==T)|
(TrialOne_manRef_HR_30_30==T|TrialOne_manRef_Hosp_30_30==T)),
bp_4:=bp_4-1]
xtabs(~t2reboot$bp_4, addNA=T)
# 35-37 week visit
t2reboot[,bp_5:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(237,244]", "(244,265]") |
TrialOne_anvisitnew_35_37, bp_5:=1]
xtabs(~t2reboot$bp_5, addNA=T)
t2reboot[bp_5==1 &
((TrialOne_manRef_HR_31_31==T|TrialOne_manRef_Hosp_31_31==T)|
(TrialOne_manRef_HR_32_32==T|TrialOne_manRef_Hosp_32_32==T)|
(TrialOne_manRef_HR_33_33==T|TrialOne_manRef_Hosp_33_33==T)|
(TrialOne_manRef_HR_34_34==T|TrialOne_manRef_Hosp_34_34==T)),
bp_5:=bp_5-1]
xtabs(~t2reboot$bp_5, addNA=T)
################ successes ##########
# before 15 weeks
# 15-17 week visit
t2reboot[,Succ_1a:=as.logical(NA)]
t2reboot[bp_1a==1, Succ_1a:=FALSE]
t2reboot[Succ_1a==F &
TrialOne_anbpsyst_present_00_14==T, Succ_1a:=TRUE]
xtabs(~t2reboot$Succ_1a, addNA=T)
xtabs(~t2reboot$bp_1a, addNA=T)
# 15-17 week visit
t2reboot[,Succ_1:=as.logical(NA)]
t2reboot[bp_1==1, Succ_1:=FALSE]
t2reboot[Succ_1==F &
TrialOne_anbpsyst_present_15_17==T, Succ_1:=TRUE]
xtabs(~t2reboot$bp_1, addNA=T)
xtabs(~t2reboot$Succ_1, addNA=T)
# 18-22 week visit
t2reboot[,Succ_2:=as.logical(NA)]
t2reboot[bp_2==1, Succ_2:=FALSE]
t2reboot[Succ_2==F & TrialOne_anbpsyst_present_18_22==T, Succ_2:=TRUE]
xtabs(~t2reboot$bp_2, addNA=T)
xtabs(~t2reboot$Succ_2, addNA=T)
# 24-28 week visit
t2reboot[,Succ_3:=as.logical(NA)]
t2reboot[bp_3==1, Succ_3:=as.logical(FALSE)]
t2reboot[Succ_3==F & TrialOne_anbpsyst_present_24_28==T, Succ_3:=TRUE]
xtabs(~t2reboot$bp_3, addNA=T)
xtabs(~t2reboot$Succ_3, addNA=T)
# 31-33 week visit
t2reboot[,Succ_4:=as.logical(NA)]
t2reboot[bp_4==1, Succ_4:=FALSE]
t2reboot[Succ_4==F & TrialOne_anbpsyst_present_31_33==T, Succ_4:=TRUE]
xtabs(~t2reboot$bp_4, addNA=T)
xtabs(~t2reboot$Succ_4, addNA=T)
# 35-37
t2reboot[,Succ_5:=as.logical(NA)]
t2reboot[bp_5==1, Succ_5:=FALSE]
t2reboot[Succ_5==F & TrialOne_anbpsyst_present_35_37==T, Succ_5:=TRUE]
xtabs(~t2reboot$bp_5, addNA=T)
xtabs(~t2reboot$Succ_5, addNA=T)
prelimHTN <- t2reboot[,.(N=.N,
Screenb415=sum(bp_1a==T, na.rm=T),
Success_1A=sum(Succ_1a==T, na.rm=T),
opport_15_17=sum(bp_1==T, na.rm=T),
Success_1=sum(Succ_1==T, na.rm=T),
opport_18_22=sum(bp_2==T, na.rm=T),
Success_2=sum(Succ_2==T, na.rm=T),
opport_24_28=sum(bp_3==T, na.rm=T),
Success_3=sum(Succ_3==T, na.rm=T),
opport_31_33=sum(bp_4==T, na.rm=T),
Success_4=sum(Succ_4==T, na.rm=T),
opport_35_37=sum(bp_5==T, na.rm=T),
Success_5=sum(Succ_5==T, na.rm=T))]
openxlsx::write.xlsx(prelimHTN,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruitment_prelim_HTN.xlsx",
lubridate::today())))
|
/analyses/Trial 2/Recruitment update_Gaza.R
|
no_license
|
raubreywhite/trial_dofiles
|
R
| false
| false
| 95,815
|
r
|
#T2 gaza indicators
##### TO DO: create new function or adjust the IS_Gaza portion of the code to make it
#### create these new vars
nam <- names(d)[stringr::str_detect(names(d),"^usedd_[0-9]*$")]
num <- stringr::str_replace(nam,"usedd_","")
d[,first_1_21_usedd:=as.Date(NA)]
for(i in num ){
print(i)
var_usedd <- sprintf("usedd_%s",i)
var_usgestage <- sprintf("usgestage_%s",1)
d[!is.na(get(var_usedd)) &
!is.na(get(var_usgestage)) &
get(var_usgestage) > 0 &
get(var_usgestage) < 23 &
is.na(first_1_21_usedd),
first_1_21_usedd:=as.Date(get(var_usedd),format="%Y-%m-%d")]
}
# define data set
t2reboot <- d[bookdate>"2019-12-01" & ident_TRIAL_2_and_3==T,]
t2reboot[,smsyes:=areyouwillingtoreceivesmstextmessagesandremindersaboutyourvisits]
####### creating variables we need #######
# making bookgestage cats
t2reboot[,bookgestagecat:=cut(bookgestage,
breaks=c(0,14,17,22,23,28,30,33,34,37,40),
include.lowest=T)]
#### gestage variable today (days) for attendance ####
today <- lubridate::today()
t2reboot[,gAtoday_days:=as.numeric(NA)]
t2reboot[!is.na(first_1_21_usedd),
gAtoday_days:=as.numeric(
difftime(first_1_21_usedd,today, units="days"))]
t2reboot[is.na(first_1_21_usedd),
gAtoday_days:=as.numeric(difftime(lubridate::today(),booklmp,units="days"))]
# timely anc variables
t2reboot[,ancbefore15:=as.logical(NA)]
t2reboot[,anc15to17:=as.logical(NA)]
t2reboot[,anc18to22:=as.logical(NA)]
t2reboot[,anc23:=as.logical(NA)]
t2reboot[,anc24to28:=as.logical(NA)]
t2reboot[,anc29to30:=as.logical(NA)]
t2reboot[,anc31to33:=as.logical(NA)]
t2reboot[,anc34:=as.logical(NA)]
t2reboot[,anc35to37:=as.logical(NA)]
t2reboot[,anc38to40:=as.logical(NA)]
vars_gestage <- stringr::str_subset(names(t2reboot),"^angestage_[0-9]+")
vars_anevent <- stringr::str_subset(names(t2reboot),"^anevent_[0-9]+")
for(i in vars_gestage){
t2reboot[get(vars_gestage)>0 & get(vars_gestage)<=14 &
!is.na(vars_anevent),ancbefore15:=TRUE]
t2reboot[get(vars_gestage)>=15 & get(vars_gestage)<=17 &
!is.na(vars_anevent),anc15to17:=TRUE]
t2reboot[get(vars_gestage)>=18 & get(vars_gestage)<=22 &
!is.na(vars_anevent),anc18to22:=TRUE]
t2reboot[get(vars_gestage)==23 &!is.na(vars_anevent),anc23:=TRUE]
t2reboot[get(vars_gestage)>=24 & get(vars_gestage)<=28 &
!is.na(vars_anevent),anc24to28:=TRUE]
t2reboot[get(vars_gestage)>=29 & get(vars_gestage)<=30 &
!is.na(vars_anevent),anc29to30:=TRUE]
t2reboot[get(vars_gestage)>=31 & get(vars_gestage)<=33 &
!is.na(vars_anevent),anc31to33:=TRUE]
t2reboot[get(vars_gestage)==34 & !is.na(vars_anevent),anc34:=TRUE]
t2reboot[get(vars_gestage)>=35 & get(vars_gestage)<=37 &
!is.na(vars_anevent),anc35to37:=TRUE]
t2reboot[get(vars_gestage)>=38 & get(vars_gestage)<=40 &
!is.na(vars_anevent),anc38to40:=TRUE]
}
t2nums <- t2reboot[,.(N=.N,
Booked=sum(ident_dhis2_booking==T, na.rm=T),
ANCvisits=sum(ident_dhis2_an==T, na.rm=T),
BookedSMSyes=sum(smsyes==1,na.rm=T),
BookedSMSMno=sum(smsyes==0, na.rm=T),
BookedSMSmiss=sum(is.na(smsyes)),
BookedSMSonly=sum(ident_TRIAL_2==T &
ident_TRIAL_3==F, na.rm=T),
BookedinSMSclinic=sum(ident_TRIAL_2==T, na.rm=T),
BookedQIDonly=sum(ident_TRIAL_2==F & ident_TRIAL_3==T),
BookdQID=sum(ident_TRIAL_3==T, na.rm=T),
BookedBoth=sum(ident_TRIAL_3==T & ident_TRIAL_2==T),
BookedControl=sum(ident_TRIAL_2_3_Control, na.rm=T)
)]
openxlsx::write.xlsx(t2nums,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf("%s_recruit_update_by_arm.xlsx",
lubridate::today())))
# bookgestage cats
t2bookcats <- t2reboot[,.(N=.N),
keyby=.(bookgestagecat)]
openxlsx::write.xlsx(t2bookcats,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf(
"%s_recruit_update_bookgestage.xlsx",
lubridate::today())))
#anc visits timely and not timely
vars <- names(t2reboot)[stringr::str_detect(names(t2reboot),"^anevent_[0-9]+")]
t2reboot[,anevent_x:=0]
print(vars)
for(i in vars){
t2reboot[!is.na(get(i)), anevent_x:=anevent_x + 1]
}
sum(t2reboot[ident_dhis2_control==F]$anevent_x,na.rm=T)
# visits per clinic
t2visits <- t2reboot[,.(N=.N,
bookingvisits=sum(!is.na(bookevent), na.rm=T),
expectedtohavedelivered=sum(gAtoday_days>=280 &
gAtoday_days<=300),
ancvisits=sum(anevent_x, na.rm=T),
ancb415=sum(ancbefore15, na.rm=T),
anc15to17=sum(anc15to17, na.rm=T),
anc18to22=sum(anc18to22, na.rm=T),
anc23=sum(anc23, na.rm=T),
anc24to28=sum(anc24to28, na.rm=T),
anc29to30=sum(anc29to30, na.rm=T),
anc31to33=sum(anc31to33, na.rm=T),
anc34=sum(anc34, na.rm=T),
anc35to37=sum(anc35to37, na.rm=T)),
keyby=.(ident_TRIAL_2_3_Control,str_TRIAL_2_Cluster)]
openxlsx::write.xlsx(t2visits,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf(
"%s_recruit_update_visits_by_clinic.xlsx",
lubridate::today())))
t2visits <- t2reboot[,.(N=.N,
bookingvisits=sum(!is.na(bookevent), na.rm=T),
ancvisits=sum(anevent_x, na.rm=T),
ancb415=sum(ancbefore15, na.rm=T),
anc15to17=sum(anc15to17, na.rm=T),
anc18to22=sum(anc18to22, na.rm=T),
anc23=sum(anc23, na.rm=T),
anc24to28=sum(anc24to28, na.rm=T),
anc29to30=sum(anc29to30, na.rm=T),
anc31to33=sum(anc31to33, na.rm=T),
anc34=sum(anc34, na.rm=T),
anc35to37=sum(anc35to37, na.rm=T))]
openxlsx::write.xlsx(t2visits,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf("%s_recruit_update_visits.xlsx",
lubridate::today())))
# by bookgestage
t2visits <- t2reboot[,.(N=.N,
bookingvisits=sum(!is.na(bookevent), na.rm=T),
expectedtohavedelivered=sum(gAtoday_days>=280 &
gAtoday_days<=300),
ancvisits=sum(anevent_x, na.rm=T),
ancb415=sum(ancbefore15, na.rm=T),
anc15to17=sum(anc15to17, na.rm=T),
anc18to22=sum(anc18to22, na.rm=T),
anc23=sum(anc23, na.rm=T),
anc24to28=sum(anc24to28, na.rm=T),
anc29to30=sum(anc29to30, na.rm=T),
anc31to33=sum(anc31to33, na.rm=T),
anc34=sum(anc34, na.rm=T),
anc35to37=sum(anc35to37, na.rm=T)),
keyby=.(bookgestagecat)]
openxlsx::write.xlsx(t2visits,file.path(FOLDER_DATA_RESULTS_GAZA,
"T2",
sprintf(
"%s_recruit_update_visits_by_bookgestage.xlsx",
lubridate::today())))
####################### Process outcomes #######################
t2reboot[,bookgestagedays_cats:=cut(bookgestagedays,
breaks=c(-500,0,104,
125,160,167,202,
216,237,244,265,293),
include.lowest=T)]
# MAKE BOOK VISIT FOR ANEMIA
t2reboot[,booklabhb:=as.numeric(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,booklabhb:=labhb_1]
# MAKE BOOK VISIT FOR Laburglu
t2reboot[,booklaburglu:=as.character(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7 & laburglu_1%in%c("NEG","POS"),
booklaburglu:=laburglu_1]
t2reboot[,booklaburglu:=NULL]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,
booklaburglu:=laburglu_1]
xtabs(~t2reboot$booklaburglu)
str(t2reboot$booklaburglu)
unique(t2reboot$booklaburglu)
t2reboot[,booklaburglu:=NULL]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7 & laburglu_1%in%c("NEG","POS"),
booklaburglu:=laburglu_1]
xtabs(~t2reboot$booklaburglu)
# MAKE BOOK VISIT FOR LABBLOODGLU
t2reboot[,booklabbloodglu:=as.integer(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,booklabbloodglu:=labbloodglu_1]
xtabs(~t2reboot$booklabbloodglu, addNA=T)
# MAKE BOOK VISIT FOR LABBLOODGLU_HIGH
t2reboot[,booklabbloodglu_high:=as.logical(NA)]
t2reboot[!is.na(booklabbloodglu),booklabbloodglu_high:=FALSE]
t2reboot[booklabbloodglu>=140 & booklabbloodglu<500,booklabbloodglu_high:=TRUE]
xtabs(~t2reboot$booklabbloodglu_high, addNA=T)
# MAKE BOOK VISIT FOR LABFASTBLOODGLU
t2reboot[,booklabfastbloodglu:=as.numeric(NA)]
t2reboot[abs(labT1gestagedays_1-bookgestagedays)<7,booklabfastbloodglu:=labfastbloodglu_1]
xtabs(~t2reboot$booklabfastbloodglu)
# MAKE BOOK VISIT FOR LABfastBLOODGLU_HIGH
t2reboot[,booklabfastbloodglu_high:=as.logical(NA)]
t2reboot[!is.na(booklabfastbloodglu),booklabfastbloodglu_high:=FALSE]
t2reboot[booklabfastbloodglu>126 ,booklabfastbloodglu_high:=TRUE]
xtabs(~t2reboot$booklabfastbloodglu_high, addNA=T)
# Discrepancy Variable anexamsfh variable
t2reboot[,anexamsfh_0:=bookexamsfh]
t2reboot[,angestage_0:=bookgestage]
vars <- stringr::str_subset(names(t2reboot), "^anexamsfh_")
vars <- stringr::str_remove(vars, "anexamsfh_")
#anexamsfh stuff
for(i in vars){
print(i)
anexamsfh <-sprintf("anexamsfh_%s",i)
angestage <- sprintf("angestage_%s",i)
sfhDiscrep <- sprintf("sfhDiscrep_%s",i)
t2reboot[,(sfhDiscrep):=as.numeric(NA)]
t2reboot[!is.na(get(angestage)) &
!is.na(get(anexamsfh)), (sfhDiscrep):=abs(get(anexamsfh)-get(angestage))]
}
# SFH discrepancy with ancongestagesizevisitweek
vars <- stringr::str_subset(names(t2reboot), "^anconancgestationaageatvisitweeks_")
vars <- stringr::str_remove(vars, "anconancgestationaageatvisitweeks_")
#anconancgestationaageatvisitweeks var
for(i in vars){
print(i)
anconangestageweeks <-sprintf("anconancgestationaageatvisitweeks_%s",i)
angestage <- sprintf("angestage_%s",i)
sfhDiscrepCon <- sprintf("sfhDiscrepCon_%s",i)
t2reboot[,(sfhDiscrepCon):=as.numeric(NA)]
t2reboot[!is.na(get(angestage)) &
!is.na(get(anconangestageweeks)),
(sfhDiscrepCon):=abs(get(anconangestageweeks)-get(angestage))]
}
# SFH discrepancy with ancongestagesizevisitweek
vars <- stringr::str_subset(names(t2reboot), "^anconancgestationaageatvisitweeks_")
vars <- stringr::str_remove(vars, "anconancgestationaageatvisitweeks_")
#anconancgestationaageatvisitweeks var
for(i in vars){
print(i)
anconangestageweeks <-sprintf("anconancgestationaageatvisitweeks_%s",i)
angestage <- sprintf("angestage_%s",i)
sfhDiscrepCon <- sprintf("sfhDiscrepCon_%s",i)
t2reboot[,(sfhDiscrepCon):=as.numeric(NA)]
t2reboot[!is.na(get(angestage)) &
!is.na(get(anconangestageweeks)),
(sfhDiscrepCon):=abs(get(anconangestageweeks)-get(angestage))]
}
# anT1 in weeks to calculate sfhDiscrep via anexamsfh and anT1gestagedays to weeks
t2reboot[,anT1gestagedays_0:=bookgestagedays]
vars <- stringr::str_subset(names(t2reboot), "^anT1gestagedays_")
vars <- stringr::str_remove(vars, "^anT1gestagedays_")
for (i in vars){
anT1gestagedays <- sprintf("anT1gestagedays_%s",i)
anT1gAweeks <- sprintf("anT1gAweeks_%s",i)
t2reboot[, (anT1gAweeks):=floor(get(anT1gestagedays)/7)]
}
# Discrepancy Variable anexamsfh variable
t2reboot[,anexamsfh_0:=bookexamsfh]
vars <- stringr::str_subset(names(t2reboot), "^anexamsfh_")
vars <- stringr::str_remove(vars, "anexamsfh_")
# i in length will just print out the length
# i in 1:length same as seq_along. but seq_along preferred bc works when length of vector
#is 0. Use this if have predefined
# i in vars (object) take every single value of vars and loop through them
#anexamsfh stuff with anT1gAweeks
for(i in vars){
print(i)
anexamsfh <-sprintf("anexamsfh_%s",i)
anT1gAweeks <- sprintf("anT1gAweeks_%s",i)
sfhDiscrepAnt1gas <- sprintf("sfhDiscrepAnt1gas_%s",i)
sfhDiscrepAnt1gasCat <- sprintf("sfhDiscrepAnt1gasCat_%s",i)
t2reboot[!is.na(anT1gAweeks) &
!is.na(get(anexamsfh)), (sfhDiscrepAnt1gas):=abs(get(anexamsfh)-get(anT1gAweeks))]
t2reboot[!is.na(anT1gAweeks) &
!is.na(get(anexamsfh)), (sfhDiscrepAnt1gasCat):=abs(get(anexamsfh)-get(anT1gAweeks))>2]
}
# an exam malpresentation into one variable
vars_source <- names(d)[stringr::str_detect(names(t2reboot),"^anexampalp_")]
vars_outcome <- stringr::str_replace(vars_source, "anexampalp", "malpresanexam_")
for(i in 1:length(vars_source)){
var_source <- vars_source[i]
var_outcome <- vars_outcome[i]
t2reboot[get(var_source) %in% c("Trasverse", "Breech"), (var_outcome):="Yes"]
}
# uspres malpresentation variable
vars_source <- names(d)[stringr::str_detect(names(t2reboot),"^uspres_")]
vars_outcome <- stringr::str_replace(vars_source, "uspres_", "us_malpres_")
for(i in 1:length(vars_source)){
var_source <- vars_source[i]
var_outcome <- vars_outcome[i]
t2reboot[get(var_source) %in% c("Trasverse", "Breech"), (var_outcome):="Yes"]
}
VisitVariables <- function(t2reboot,days,variableOfInterestName,variableOfInterestPattern,TruevaluesMin=NULL,TruevaluesMax=NULL,TruevaluesDiscrete=NULL,gestagedaysVariable="anT1gestagedays" ){
if(!is.null(TruevaluesMin) & !is.null(TruevaluesMax) & !is.null(TruevaluesDiscrete)){
stop ("ALL TRUE VALUES NOT NULL")
}
if(is.null(TruevaluesMin) & is.null(TruevaluesMax) & is.null(TruevaluesDiscrete)){
stop ("ALL TRUE VALUES NULL")
}
# pull out a list of all of the gestage variables
#browser()
gestagedaysVariablewithcarrot <- sprintf("^%s",gestagedaysVariable)
listOfGestAgeVars <- names(t2reboot)[stringr::str_detect(names(t2reboot),gestagedaysVariablewithcarrot)]
listOfInterestVars <- stringr::str_replace(listOfGestAgeVars, gestagedaysVariable,variableOfInterestPattern)
for(i in 1:length(days)){
# name of new variable
var <- sprintf("TrialOne_%s_%s",variableOfInterestName,names(days)[i])
# initialize all as FALSE if has booking variable
t2reboot[!is.na(ident_dhis2_booking),(var):=FALSE]
#xtabs(~t2reboot[[var]])
# loop through the "gestage"/"bp" variables
for(j in 1:length(listOfGestAgeVars)){
gestageVar <- listOfGestAgeVars[j]
interestVar <- listOfInterestVars[j]
#asking discrete question
if(!is.null(TruevaluesDiscrete)){
t2reboot[!is.na(get(var)) & get(gestageVar) %in% days[[i]] & !is.na(get(interestVar)) & get(interestVar) %in% TruevaluesDiscrete ,(var):=TRUE]
}else{ #asking non discrete questions
t2reboot[!is.na(get(var)) & get(gestageVar) %in% days[[i]] & !is.na(get(interestVar)) & get(interestVar)>=TruevaluesMin & get(interestVar)<=TruevaluesMax, (var):=TRUE]
}
}
}
return(t2reboot)
}
###### identifying outcomes #######
# categories we want
days <- list(
"00_14"=c(-500:104),
"15_17"=c(105:125),
"18_22"=c(126:160),
"23_23"=c(161:167),
"24_28"=c(168:202),
"29_30"=c(203:216),
"31_33"=c(217:237),
"34_34"=c(238:244),
"35_37"=c(245:265),
"38_41"=c(266:293),
#using below vectors for managementsinstead of using two seperate vectors
"00_00"=0*7+c(0:6),
"01_01"=1*7+c(0:6),
"02_02"=2*7+c(0:6),
"03_03"=3*7+c(0:6),
"04_04"=4*7+c(0:6),
"05_05"=5*7+c(0:6),
"06_06"=6*7+c(0:6),
"07_07"=7*7+c(0:6),
"08_08"=8*7+c(0:6),
"09_09"=9*7+c(0:6),
"10_10"=10*7+c(0:6),
"11_11"=11*7+c(0:6),
"12_12"=12*7+c(0:6),
"13_13"=13*7+c(0:6),
"14_14"=14*7+c(0:6),
"15_15"=15*7+c(0:6),
"16_16"=16*7+c(0:6),
"17_17"=17*7+c(0:6),
"18_18"=18*7+c(0:6),
"19_19"=9*7+c(0:6),
"20_20"=20*7+c(0:6),
"21_21"=21*7+c(0:6),
"22_22"=22*7+c(0:6),
#"23_23"=23*7+c(0:6),
"24_24"=24*7+c(0:6),
"25_25"=25*7+c(0:6),
"26_26"=26*7+c(0:6),
"27_27"=27*7+c(0:6),
"28_28"=28*7+c(0:6),
"29_29"=29*7+c(0:6),
"30_30"=30*7+c(0:6),
"31_31"=31*7+c(0:6),
"32_32"=32*7+c(0:6),
"33_33"=33*7+c(0:6),
"34_34"=34*7+c(0:6),
"35_35"=35*7+c(0:6),
"36_36"=36*7+c(0:6),
"37_37"=37*7+c(0:6),
"38_38"=38*7+c(0:6),
"39_39"=39*7+c(0:6),
"40_40"=40*7+c(0:6),
"41_41"=41*7+c(0:6),
"42_42"=42*7+c(0:6)
)
###ANC Visits####
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anvisitnew",
variableOfInterestPattern="anT1gestagedays",
TruevaluesMin=-500,
TruevaluesMax=260,
gestagedaysVariable="anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
xtabs(~t2reboot$TrialOne_anvisitnew_00_00)
###ANC BP SYT ####
# BP SYST Present
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot<-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_present",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=60,
TruevaluesMax=170,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_present_00_00)
# BP Diast Present
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot<- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_present",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=40,
TruevaluesMax=170,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_present_00_14)
# BP Syst High
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_high",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=140,
TruevaluesMax=170,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_high_00_14)
# BP Syst MildHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_mildHTN",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=140,
TruevaluesMax=149,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_mildHTN_00_14)
# BP Syst ModSevHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpsyst_0:=bookbpsyst]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpsyst_modSevHTN",
variableOfInterestPattern="anbpsyst",
TruevaluesMin=150,
TruevaluesMax=170,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpsyst_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpsyst_modSevHTN_00_14)
# BP Diast High
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot <-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_high",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=90,
TruevaluesMax=200,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_high_00_14)
# BP Diast MildHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_mildHTN",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=90,
TruevaluesMax=99,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_mildHTN_00_14)
# BP Diast Mod/SevHTN
t2reboot[,anT1gestagedays_0:=bookgestagedays]
t2reboot[,anbpdiast_0:=bookbpdiast]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="anbpdiast_modSevHTN",
variableOfInterestPattern="anbpdiast",
TruevaluesMin=100,
TruevaluesMax=200,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "anT1gestagedays")
t2reboot[,anT1gestagedays_0:=NULL]
t2reboot[,anbpdiast_0:=NULL]
xtabs(~t2reboot$TrialOne_anbpdiast_modSevHTN_00_14)
### ANC Anemia ####
# lab hb exists
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_exists",
variableOfInterestPattern="labhb",
TruevaluesMin=1,
TruevaluesMax=20,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
nrow(t2reboot[labhb_1>=4 & labhb_1<=20])
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
xtabs(~t2reboot$TrialOne_labhb_exists_15_17)
#normal hb
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_normal",
variableOfInterestPattern="labhb",
TruevaluesMin=11,
TruevaluesMax=20,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
xtabs(~t2reboot$TrialOne_labhb_normal_15_17, addNA=T)
# sev anemia
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_anemia_sev",
variableOfInterestPattern="labhb",
TruevaluesMin=1,
TruevaluesMax=6.9,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
nrow(t2reboot[labhb_1>=1 & labhb_1<7])
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
xtabs(~t2reboot$TrialOne_labhb_anemia_sev_15_17)
# mild and moderate anemia
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labhb_0:=booklabhb]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labhb_anemia_mild_mod",
variableOfInterestPattern="labhb",
TruevaluesMin=7,
TruevaluesMax=10.9,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
nrow(t2reboot[labhb_1>=7 & labhb_1<11])
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labhb_0:=NULL]
nrow(t2reboot[labgestage_1<=15 & labgestage_1<=17 & labhb_1>7 & labhb_1<11])
xtabs(~t2reboot$TrialOne_labhb_anemia_mild_mod_15_17, addNA=T)
### Lab RBS Normal ####
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,laburglu_0:=booklaburglu]
# normal urine glucose
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="laburglu_exists",
variableOfInterestPattern="laburglu",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete = c("POS", "NEG"),
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,laburglu_0:=NULL]
xtabs(~t2reboot$TrialOne_laburglu_exists_15_17)
# lab urglu pos
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,laburglu_0:=booklaburglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="laburglu_pos",
variableOfInterestPattern="laburglu",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete =c("POS"),
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,laburglu_0:=NULL]
nrow(t2reboot[laburglu_1=="POS" & labgestage_1>0 & labgestage_1<=14])
xtabs(~t2reboot$TrialOne_laburglu_pos_00_14)
# labbloodglu exist
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labbloodglu_0:=booklabbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labbloodglu_exists",
variableOfInterestPattern="labbloodglu",
TruevaluesMin=50,
TruevaluesMax=500,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labbloodglu_exists_15_17)
# high blood glucose
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labbloodglu_0:=booklabbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labbloodglu_high",
variableOfInterestPattern="labbloodglu",
TruevaluesMin=140,
TruevaluesMax=500,
TruevaluesDiscrete =NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labbloodglu_high_00_14)
xtabs(~t2reboot$TrialOne_labbloodglu_high_18_22)
# Lab FBS exists
#http://perinatology.com/Reference/Reference%20Ranges/Glucose,%20fasting.htm
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_exists",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=50,
TruevaluesMax=200,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_exists_15_17)
# Lab FBS Normal
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_normal",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=71,
TruevaluesMax=91,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_normal_15_17)
# Lab FBS likely GDM
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_likelyGDM",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=92,
TruevaluesMax=125,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_likelyGDM_24_28)
# Lab FBS High
t2reboot[,labT1gestagedays_0:=bookgestagedays]
t2reboot[,labfastbloodglu_0:=booklabfastbloodglu]
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="labfastbloodglu_high",
variableOfInterestPattern="labfastbloodglu",
TruevaluesMin=126,
TruevaluesMax=500,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "labT1gestagedays")
t2reboot[,labT1gestagedays_0:=NULL]
t2reboot[,labfastbloodglu_0:=NULL]
xtabs(~t2reboot$TrialOne_labfastbloodglu_high_24_28)
#### US visits ####
# Has US visit
t2reboot <-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_exists",
variableOfInterestPattern="usT1gestagedays",
TruevaluesMin=10,
TruevaluesMax=300,
TruevaluesDiscrete = NULL,
gestagedaysVariable ="usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_exists_00_14)
# US suspected IUGR
t2reboot <-VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_iugrSuspected",
variableOfInterestPattern="usiugr",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete = 1,
gestagedaysVariable ="usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_iugrSuspected_00_14)
# US expected LGA
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_lgaSuspected",
variableOfInterestPattern="uslga",
TruevaluesMin=1,
TruevaluesMax=1,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_lgaSuspected_00_14)
# US pres-malpresentation
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_malpres",
variableOfInterestPattern="us_malpres",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete="Yes",
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_malpres_00_14)
# US pres-malpresentation
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_malpresvar",
variableOfInterestPattern="uspres",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete= c("Trasverse","Breech"),
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_malpresvar_00_14)
#uspres_checked
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="us_pres_checked",
variableOfInterestPattern="uspres",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete= c("Trasverse","Breech","Cephalic","Unknown"),
gestagedaysVariable = "usT1gestagedays")
xtabs(~t2reboot$TrialOne_us_pres_checked_00_14, addNA=T)
### removed sfh discrepancies and anexampalp code from here
####Referrals####
# Ref to HR
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="refHR",
variableOfInterestPattern="mantypex",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete ="RefHighRisk",
gestagedaysVariable = "manT1gestagedays")
nrow(t2reboot[mantypex_1=="RefHighRisk" & manT1gestagedays_1>=15 & manT1gestagedays_1<=17])
nrow(t2reboot[mantypex_1=="RefHighRisk" & mangestage_1>=0 & mangestage_1<=14])
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_refHR_00_14)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_refHR_00_14)
xtabs(~t2reboot$TrialOne_refHR_35_37)
# Ref to Hosp
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="refHosp",
variableOfInterestPattern="mantypex",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete ="RefHosp",
gestagedaysVariable = "manT1gestagedays")
nrow(t2reboot[mantypex_1=="RefHosp" & mangestage_1>=0 & mangestage_1<=14])
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_refHosp_00_14)
xtabs(~t2reboot[ident_dhis2_control==F]$mantypex_1, addNA=T)
# RefDiabetes
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="refDiab",
variableOfInterestPattern="mantypex",
TruevaluesMin=NULL,
TruevaluesMax=NULL,
TruevaluesDiscrete ="RefDiabetes",
gestagedaysVariable = "manT1gestagedays")
nrow(t2reboot[mantypex_1=="RefDiabetes" & mangestage_1>=0 & mangestage_1<=14])
xtabs(~t2reboot$TrialOne_refDiab_00_14)
# Management Performed
t2reboot <- VisitVariables(
t2reboot=t2reboot,
days=days,
variableOfInterestName="manperf",
variableOfInterestPattern="manperf",
TruevaluesMin=1,
TruevaluesMax=1,
TruevaluesDiscrete = NULL,
gestagedaysVariable = "manT1gestagedays")
xtabs(~t2reboot$TrialOne_manperf_18_22)
######### Managements ############
# take into account the 4 weeks after 37
#sev anemia
for(i in 0:37){
#i=23
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_manhb <- sprintf("TrialOne_manhb_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manhb <- "temp_manhb"
#id source
var_badhb <- sprintf("TrialOne_labhb_anemia_sev_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manhb):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badhb)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badhb)==TRUE, (var_temp_manhb):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manhb)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manhb):=TRUE]
}
#making var for sev anemia
t2reboot[,(var_manhb):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manhb):=get(var_temp_manhb)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manhb):=get(var_temp_manhb) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manhb):=NULL]
}
xtabs(~t2reboot$TrialOne_manhb_24_24)
#mild_mod anemia retest after one month
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(3:5), width=2, flag="0")
#output variable
var_manhb <- sprintf("TrialOne_manhb_mildmodhbret_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manhb <- "temp_manhb"
#id source
var_badhb <- sprintf("TrialOne_labhb_anemia_mild_mod_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manhb):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badhb)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badhb)==TRUE, (var_temp_manhb):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_labhb_exists_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second anemia check
#var_secondcheck <- sprintf("TrialOne_labhb_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manhb)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manhb):=TRUE]
}
#making var for sev anemia
t2reboot[,(var_manhb):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manhb):=get(var_temp_manhb)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manhb):=get(var_temp_manhb) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manhb):=NULL]
}
xtabs(~t2reboot$TrialOne_manhb_mildmodhbret_32_32)
#mild htn
#Urine stick AND LFT AND KFT AND ultrasound within a week
#refer to hospital if proteinuria
#ModsevGHTbpsyst
for(i in 0:37){
#i=23
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(3:4), width=2, flag="0")
#output variable
var_manght <- sprintf("TrialOne_manhtn_ModSev_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manght <- "temp_manght"
#id source
var_badght <- sprintf("TrialOne_anbpsyst_modSevHTN_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manght):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badght)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badght)==TRUE, (var_temp_manght):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second anemia check
var_secondcheck <- sprintf("TrialOne_anbpsyst_present_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manght)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manght):=TRUE]
}
#making var for sev anemia
t2reboot[,(var_manght):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manght):=get(var_temp_manght)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manght):=get(var_temp_manght) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manght):=NULL]
}
xtabs(~t2reboot$TrialOne_manhtn_ModSev_18_18)
# High RBG, RefHosp
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_mangdm <- sprintf("TrialOne_manRBGHigh_Hosp_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_mangdm <- "temp_mangdm"
#id source
var_badgdm <- sprintf("TrialOne_labbloodglu_high_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_mangdm):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badgdm)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badgdm)==TRUE, (var_temp_mangdm):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_mangdm)==FALSE & get(var_secondcheck)==TRUE, (var_temp_mangdm):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_mangdm):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_mangdm):=get(var_temp_mangdm)]
#intervention
t2reboot[ident_dhis2_control==F,(var_mangdm):=get(var_temp_mangdm) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_mangdm):=NULL]
}
xtabs(~t2reboot$TrialOne_manRBGHigh_Hosp_24_24)
# High RBG, RefHR
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_mangdm <- sprintf("TrialOne_manRBGHigh_HR_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_mangdm <- "temp_mangdm"
#id source
var_badgdm <- sprintf("TrialOne_labbloodglu_high_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_mangdm):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badgdm)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badgdm)==TRUE, (var_temp_mangdm):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHR_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_mangdm)==FALSE & get(var_secondcheck)==TRUE, (var_temp_mangdm):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_mangdm):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_mangdm):=get(var_temp_mangdm)]
#intervention
t2reboot[ident_dhis2_control==F,(var_mangdm):=get(var_temp_mangdm) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_mangdm):=NULL]
}
xtabs(~t2reboot$TrialOne_manRBGHigh_HR_24_24)
# High RBG, RefDIAB
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_mangdm <- sprintf("TrialOne_manRBGHigh_Diab_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_mangdm <- "temp_mangdm"
#id source
var_badgdm <- sprintf("TrialOne_labbloodglu_high_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_mangdm):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badgdm)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badgdm)==TRUE, (var_temp_mangdm):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refDiab_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_mangdm)==FALSE & get(var_secondcheck)==TRUE, (var_temp_mangdm):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_mangdm):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_mangdm):=get(var_temp_mangdm)]
#intervention
t2reboot[ident_dhis2_control==F,(var_mangdm):=get(var_temp_mangdm) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_mangdm):=NULL]
}
xtabs(~t2reboot$TrialOne_manRBGHigh_HR_24_24)
# malpresentation: us_malpres
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_manpres <- sprintf("TrialOne_manmalpres_us_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manpres <- "temp_manpres"
#id source
var_badpres <- sprintf("TrialOne_us_malpresvar_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manpres):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badpres)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badpres)==TRUE, (var_temp_manpres):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manpres)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manpres):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_manpres):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manpres):=get(var_temp_manpres)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manpres):=get(var_temp_manpres) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manpres):=NULL]
}
xtabs(~t2reboot$TrialOne_manmalpres_us_36_36)
# malpresentation: anexampalpmal
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:1), width=2, flag="0")
#output variable
var_manpres <- sprintf("TrialOne_manmalpres_anexam_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_manpres <- "temp_manpres"
#id source
var_badpres <- sprintf("TrialOne_anexampalpmal_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_manpres):=as.logical(NA)]
# is false, if you have a bad hb
t2reboot[get(var_badpres)==TRUE, (var_temp_manperf):=FALSE]
t2reboot[get(var_badpres)==TRUE, (var_temp_manpres):=FALSE]
for(week_later in weeks_later){
# working only on manerf check
var_secondcheck <- sprintf("TrialOne_refHosp_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_secondcheck)==TRUE, (var_temp_manperf):=TRUE]
# working only on second check
# var_secondcheck <- sprintf("TrialOne_labbloodglu_exists_%s_%s",
# week_later,
# week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manpres)==FALSE & get(var_secondcheck)==TRUE, (var_temp_manpres):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_manpres):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_manpres):=get(var_temp_manpres)]
#intervention
t2reboot[ident_dhis2_control==F,(var_manpres):=get(var_temp_manpres) & get(var_temp_manperf)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_manpres):=NULL]
}
xtabs(~t2reboot$TrialOne_manmalpres_anexam_35_35)
### iugr and lga stuff for managements was remoed from here
########################## Referred for any management ##########################
############ Ref Hosp ####################
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:0), width=2, flag="0")
#output variable
var_refHosp <- sprintf("TrialOne_manRef_Hosp_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_refHosp <- "temp_refHosp"
#id source
var_refHospsource <- sprintf("TrialOne_refHosp_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_refHosp):=as.logical(NA)]
# is false, if you have a referral
# intervention
t2reboot[get(var_refHospsource)==TRUE, (var_temp_manperf):=FALSE]
# everyone
#t2reboot[!is.na(get(var_refHospsource)), (var_temp_refHosp):=FALSE]
# control
t2reboot[get(var_refHospsource)==TRUE, (var_temp_refHosp):=TRUE]
for(week_later in weeks_later){
# working only on manperf check
var_manperf <- sprintf("TrialOne_manperf_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_manperf)==TRUE, (var_temp_manperf):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_refHosp):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_refHosp):=get(var_temp_refHosp)]
#intervention
t2reboot[ident_dhis2_control==F,(var_refHosp):=get(var_temp_manperf) &
get(var_temp_refHosp)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_refHosp):=NULL]
}
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_Hosp_35_35)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_Hosp_35_35)
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_Hosp_32_32)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_Hosp_32_32)
checkHosp <- t2reboot[!is.na(TrialOne_manRef_Hosp_32_32) &
ident_dhis2_control==F, c("TrialOne_manperf_32_32",
"TrialOne_refHosp_32_32",
"TrialOne_manRef_Hosp_32_32")]
########## Ref HR for any reason at any time point #########
for(i in 0:37){
# make sure everything has 2 digits (with 0 in front)
week_current <- formatC(i, width=2, flag="0")
weeks_later <- formatC(i+c(0:0), width=2, flag="0")
#output variable
var_refHR <- sprintf("TrialOne_manRef_HR_%s_%s", week_current, week_current)
var_temp_manperf <- "temp_manperf"
var_temp_refHR <- "temp_refHR"
#id source
var_refHRsource <- sprintf("TrialOne_refHR_%s_%s", week_current, week_current)
# no one has anything
t2reboot[,(var_temp_manperf):=as.logical(NA)]
t2reboot[,(var_temp_refHR):=as.logical(NA)]
# is false, if you have a referral
# intervention
t2reboot[get(var_refHRsource)==TRUE, (var_temp_manperf):=FALSE]
# control
t2reboot[get(var_refHRsource)==TRUE, (var_temp_refHR):=TRUE]
for(week_later in weeks_later){
# working only on manperf check
var_manperf <- sprintf("TrialOne_manperf_%s_%s",
week_later,
week_later)
# if they have “bad management” (currently) and “good second check” then turn their management into “good management”
t2reboot[get(var_temp_manperf)==FALSE &
get(var_manperf)==TRUE, (var_temp_manperf):=TRUE]
}
#making var for high blood glu
t2reboot[,(var_refHR):=as.logical(NA)]
#control
t2reboot[ident_dhis2_control==T,(var_refHR):=get(var_temp_refHR)]
#intervention
t2reboot[ident_dhis2_control==F,(var_refHR):=get(var_temp_manperf) &
get(var_temp_refHR)]
#delete these variables because will use them in the subsequent loops we make
t2reboot[,(var_temp_manperf):=NULL]
t2reboot[,(var_temp_refHR):=NULL]
}
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_HR_35_35)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_HR_35_35)
xtabs(~t2reboot[ident_dhis2_control==T]$TrialOne_manRef_HR_20_20)
xtabs(~t2reboot[ident_dhis2_control==F]$TrialOne_manRef_HR_20_20)
checkHR <- t2reboot[!is.na(TrialOne_manRef_HR_20_20) &
ident_dhis2_control==F, c("TrialOne_manperf_20_20",
"TrialOne_refHR_20_20",
"TrialOne_manRef_HR_20_20")]
##################### Process Outcomes #################
########## Anemia ##########
# Define opportunities at 3 different cut off points
## booked before 24
t2reboot[,Opportunity_anemia_screening_1:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]",
"(104,125]",
"(125,160]",
"(160,167]"),
Opportunity_anemia_screening_1:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_1, addNA=T)
## booked 24 or has visit
t2reboot[,Opportunity_anemia_screening_2:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(167,202]")|
TrialOne_anvisitnew_24_28==T,
Opportunity_anemia_screening_2:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_2, addNA=T)
# booked 29-34 weeks or has visit
t2reboot[,Opportunity_anemia_screening_3:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(202,216]",
"(216,237]",
"(237,244]"),
Opportunity_anemia_screening_3:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_3, addNA=T)
## booked or visit at 35-37 weeks
t2reboot[,Opportunity_anemia_screening_4:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(244,265]") |
TrialOne_anvisitnew_35_37==T,
Opportunity_anemia_screening_4:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_4, addNA=T)
## severe anemia at booking and at any other visit after that
t2reboot[,Opportunity_anemia_screening_5:=as.numeric(NA)]
t2reboot[TrialOne_labhb_anemia_sev_00_14==T|
TrialOne_labhb_anemia_sev_15_17==T|
TrialOne_labhb_anemia_sev_18_22==T|
TrialOne_labhb_anemia_sev_23_23==T,Opportunity_anemia_screening_5:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_5, addNA=T)
## mild mod anemia
t2reboot[,Opportunity_anemia_screening_6:=as.numeric(NA)]
t2reboot[TrialOne_labhb_anemia_mild_mod_00_14==T|
TrialOne_labhb_anemia_mild_mod_15_17==T|
TrialOne_labhb_anemia_mild_mod_18_22==T|
TrialOne_labhb_anemia_mild_mod_23_23==T,
Opportunity_anemia_screening_6:=1]
xtabs(~t2reboot$Opportunity_anemia_screening_6, addNA=T)
# ADJUSTING OPPORTUNITIES FOR THOSE WHO HAVE BEEN REFERRED
## Before 24 weeks
#variable for man sev anemia anytime before 24 weeks
t2reboot[,manhbsev:=(TrialOne_manhb_00_00 |
TrialOne_manhb_01_01 |
TrialOne_manhb_02_02 |
TrialOne_manhb_03_03 |
TrialOne_manhb_04_04 |
TrialOne_manhb_05_05 |
TrialOne_manhb_06_06 |
TrialOne_manhb_07_07 |
TrialOne_manhb_08_08 |
TrialOne_manhb_09_09 |
TrialOne_manhb_10_10 |
TrialOne_manhb_11_11 |
TrialOne_manhb_12_12 |
TrialOne_manhb_13_13 |
TrialOne_manhb_14_14 |
TrialOne_manhb_15_15 |
TrialOne_manhb_16_16 |
TrialOne_manhb_17_17 |
TrialOne_manhb_18_18 |
TrialOne_manhb_19_19 |
TrialOne_manhb_20_20 |
TrialOne_manhb_21_21 |
TrialOne_manhb_22_22 |
TrialOne_manhb_23_23)]
xtabs(~t2reboot$manhbsev, addNA=T)
t2reboot[,RefHr:=as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1, RefHr:=FALSE]
t2reboot[(TrialOne_manRef_HR_00_00==T|
TrialOne_manRef_HR_01_01==T|
TrialOne_manRef_HR_02_02==T|
TrialOne_manRef_HR_03_03==T|
TrialOne_manRef_HR_04_04==T|
TrialOne_manRef_HR_05_05==T|
TrialOne_manRef_HR_06_06==T|
TrialOne_manRef_HR_07_07==T|
TrialOne_manRef_HR_08_08==T|
TrialOne_manRef_HR_09_09==T|
TrialOne_manRef_HR_10_10==T|
TrialOne_manRef_HR_11_11==T|
TrialOne_manRef_HR_12_12==T|
TrialOne_manRef_HR_13_13==T|
TrialOne_manRef_HR_14_14==T|
TrialOne_manRef_HR_15_15==T|
TrialOne_manRef_HR_16_16==T|
TrialOne_manRef_HR_17_17==T|
TrialOne_manRef_HR_18_18==T|
TrialOne_manRef_HR_19_19==T|
TrialOne_manRef_HR_20_20==T|
TrialOne_manRef_HR_21_21==T|
TrialOne_manRef_HR_22_22==T|
TrialOne_manRef_HR_23_23==T),
RefHr:=TRUE]
xtabs(~t2reboot$RefHr, addNA=T)
## At 24-28 weeks
t2reboot[Opportunity_anemia_screening_2==1 &
(TrialOne_anvisitnew_24_24 &
(RefHr==T))|
(TrialOne_anvisitnew_25_25 &
(RefHr==T|TrialOne_manRef_HR_24_24==T))|
(TrialOne_anvisitnew_26_26 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T))|
(TrialOne_anvisitnew_27_27 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T))|
(TrialOne_anvisitnew_28_28 &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T)),
Opportunity_anemia_screening_2:=Opportunity_anemia_screening_2-1]
xtabs(~t2reboot$Opportunity_anemia_screening_2, addNA=T)
# 35-37 weeks
t2reboot[Opportunity_anemia_screening_4==1 &
(TrialOne_anvisitnew_29_30==T &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T|
TrialOne_manRef_HR_28_28==T))|
(TrialOne_anvisitnew_31_33==T &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T|
TrialOne_manRef_HR_28_28==T|
TrialOne_manRef_HR_29_29==T|
TrialOne_manRef_HR_30_30==T))|
(TrialOne_anvisitnew_34_34==T &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T|
TrialOne_manRef_HR_28_28==T|
TrialOne_manRef_HR_29_29==T|
TrialOne_manRef_HR_30_30==T|
TrialOne_manRef_HR_31_31==T|
TrialOne_manRef_HR_32_32==T|
TrialOne_manRef_HR_33_33==T)),
Opportunity_anemia_screening_4:=Opportunity_anemia_screening_4-1]
xtabs(~t2reboot$Opportunity_anemia_screening_4, addNA=T)
#define different time cats for success
t2reboot[, HbonTime_1a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1, HbonTime_1a:=FALSE]
t2reboot[, HbonTime_1b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1 &
booklabhb<7 & booklabhb>=2,HbonTime_1b:=FALSE]
t2reboot[, HbonTime_1c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1 &
booklabhb>=7 & booklabhb<11,HbonTime_1c:=FALSE ]
# Hbontime_2
t2reboot[,HbonTime_2a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_2==1, HbonTime_2a:=FALSE]
t2reboot[, HbonTime_2b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_2==1 &
TrialOne_labhb_anemia_sev_24_28==T, HbonTime_2b:=FALSE]
t2reboot[,HbonTime_2c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_2==1 &
TrialOne_labhb_anemia_mild_mod_24_28==T, HbonTime_2c:=FALSE]
# 29-34 weeks
# Hbontime_3
t2reboot[, HbonTime_3a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_3==1 &
(!is.na(booklabhb)), HbonTime_3a:=FALSE]
t2reboot[, HbonTime_3b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_3==1 &
(booklabhb<7 & booklabhb>2), HbonTime_3b:=FALSE]
t2reboot[,HbonTime_3c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_3==1 &
(booklabhb<11 & booklabhb>=7), HbonTime_3c:=FALSE]
t2reboot[,HbonTime_4a:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_4==1, HbonTime_4a:=FALSE]
t2reboot[,HbonTime_4b:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_4==1 &
TrialOne_labhb_anemia_sev_35_37==T,HbonTime_4b:=FALSE]
t2reboot[,HbonTime_4c:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_4==1 &
TrialOne_labhb_anemia_mild_mod_35_37==T, HbonTime_4c:=FALSE]
t2reboot[,HbonTime_5:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_5==1, HbonTime_5:=FALSE]
t2reboot[,HbonTime_6:= as.logical(NA)]
t2reboot[Opportunity_anemia_screening_6==1, HbonTime_6:=FALSE]
#hb on time 1, 2, 3, vars
#Screen at bookings before 24 weeks??
#check booklabhb values if normal etc
# booked before 24 weeks
t2reboot[HbonTime_1a==F & booklabhb>=11 &
booklabhb<=18, HbonTime_1a:=TRUE]
xtabs(~t2reboot$HbonTime_1a, addNA=T)
t2reboot[HbonTime_1b==F &
manhbsev==T,HbonTime_1b:=TRUE]
xtabs(~t2reboot$HbonTime_1b, addNA=T)
t2reboot[HbonTime_1c==F &
(TrialOne_manhb_mildmodhbret_00_00==T|
TrialOne_manhb_mildmodhbret_01_01==T|
TrialOne_manhb_mildmodhbret_02_02==T|
TrialOne_manhb_mildmodhbret_03_03==T|
TrialOne_manhb_mildmodhbret_04_04==T|
TrialOne_manhb_mildmodhbret_05_05==T|
TrialOne_manhb_mildmodhbret_06_06==T|
TrialOne_manhb_mildmodhbret_07_07==T|
TrialOne_manhb_mildmodhbret_08_08==T|
TrialOne_manhb_mildmodhbret_09_09==T|
TrialOne_manhb_mildmodhbret_10_10==T|
TrialOne_manhb_mildmodhbret_11_11==T|
TrialOne_manhb_mildmodhbret_12_12==T|
TrialOne_manhb_mildmodhbret_13_13==T|
TrialOne_manhb_mildmodhbret_14_14==T|
TrialOne_manhb_mildmodhbret_15_15==T|
TrialOne_manhb_mildmodhbret_16_16==T|
TrialOne_manhb_mildmodhbret_17_17==T|
TrialOne_manhb_mildmodhbret_18_18==T|
TrialOne_manhb_mildmodhbret_19_19==T|
TrialOne_manhb_mildmodhbret_20_20==T|
TrialOne_manhb_mildmodhbret_21_21==T|
TrialOne_manhb_mildmodhbret_22_22==T|
TrialOne_manhb_mildmodhbret_23_23==T),HbonTime_1c:=TRUE]
xtabs(~t2reboot$HbonTime_1c, addNA=T)
#24-28 screenings
t2reboot[HbonTime_2a==F &
TrialOne_labhb_normal_24_28==T, HbonTime_2a:=TRUE]
t2reboot[HbonTime_2b==F &
TrialOne_manhb_24_24==T|
TrialOne_manhb_25_25==T|
TrialOne_manhb_26_26==T|
TrialOne_manhb_27_27==T|
TrialOne_manhb_28_28==T, HbonTime_2b:=TRUE]
t2reboot[HbonTime_2c==F &
TrialOne_manhb_mildmodhbret_24_24==T|
TrialOne_manhb_mildmodhbret_25_25==T|
TrialOne_manhb_mildmodhbret_26_26==T|
TrialOne_manhb_mildmodhbret_27_27==T|
TrialOne_manhb_mildmodhbret_28_28==T, HbonTime_2c:=TRUE]
#booked 29-30, 31-33, 34
t2reboot[HbonTime_3a==F & Opportunity_anemia_screening_3==1 &
(booklabhb<=18 & booklabhb>11), HbonTime_3a:=TRUE]
t2reboot[HbonTime_3c==1 &
(TrialOne_manhb_mildmodhbret_29_29==T|
TrialOne_manhb_mildmodhbret_30_30==T|
TrialOne_manhb_mildmodhbret_31_31==T|
TrialOne_manhb_mildmodhbret_32_32==T|
TrialOne_manhb_mildmodhbret_33_33==T|
TrialOne_manhb_mildmodhbret_34_34==T),
HbonTime_3c:=TRUE]
t2reboot[HbonTime_3b==F &
(TrialOne_manhb_29_29==T|
TrialOne_manhb_30_30==T|
TrialOne_manhb_31_31==T|
TrialOne_manhb_32_32==T|
TrialOne_manhb_33_33==T|
TrialOne_manhb_34_34==T),
HbonTime_3b:=TRUE]
# 35-37 screenings
t2reboot[HbonTime_4a==F &
TrialOne_labhb_normal_35_37==T, HbonTime_4a:=TRUE]
t2reboot[HbonTime_4b==F &
TrialOne_manhb_35_35==T|
TrialOne_manhb_36_36==T|
TrialOne_manhb_37_37==T, HbonTime_4b:=TRUE]
t2reboot[HbonTime_4c==F &
TrialOne_manhb_mildmodhbret_35_35==T|
TrialOne_manhb_mildmodhbret_36_36==T|
TrialOne_manhb_mildmodhbret_37_37==T, HbonTime_4c:=TRUE]
# severe anemia outside of time windows
t2reboot[HbonTime_5==F &
(TrialOne_manhb_00_00==T|
TrialOne_manhb_01_01==T|
TrialOne_manhb_02_02==T|
TrialOne_manhb_03_03==T|
TrialOne_manhb_04_04==T|
TrialOne_manhb_05_05==T|
TrialOne_manhb_06_06==T|
TrialOne_manhb_07_07==T|
TrialOne_manhb_08_08==T|
TrialOne_manhb_09_09==T|
TrialOne_manhb_10_10==T|
TrialOne_manhb_11_11==T|
TrialOne_manhb_12_12==T|
TrialOne_manhb_13_13==T|
TrialOne_manhb_14_14==T|
TrialOne_manhb_15_15==T|
TrialOne_manhb_16_16==T|
TrialOne_manhb_17_17==T|
TrialOne_manhb_18_18==T|
TrialOne_manhb_19_19==T|
TrialOne_manhb_20_20==T|
TrialOne_manhb_21_21==T|
TrialOne_manhb_22_22==T|
TrialOne_manhb_23_23==T|
TrialOne_manhb_29_29==T|
TrialOne_manhb_30_30==T|
TrialOne_manhb_31_31==T|
TrialOne_manhb_32_32==T|
TrialOne_manhb_33_33==T|
TrialOne_manhb_34_34==T),HbonTime_5:=TRUE]
#mild/mod anem retest
t2reboot[HbonTime_6==F &
(TrialOne_manhb_mildmodhbret_00_00==T|
TrialOne_manhb_mildmodhbret_01_01==T|
TrialOne_manhb_mildmodhbret_02_02==T|
TrialOne_manhb_mildmodhbret_03_03==T|
TrialOne_manhb_mildmodhbret_04_04==T|
TrialOne_manhb_mildmodhbret_05_05==T|
TrialOne_manhb_mildmodhbret_06_06==T|
TrialOne_manhb_mildmodhbret_07_07==T|
TrialOne_manhb_mildmodhbret_08_08==T|
TrialOne_manhb_mildmodhbret_09_09==T|
TrialOne_manhb_mildmodhbret_10_10==T|
TrialOne_manhb_mildmodhbret_11_11==T|
TrialOne_manhb_mildmodhbret_12_12==T|
TrialOne_manhb_mildmodhbret_13_13==T|
TrialOne_manhb_mildmodhbret_14_14==T|
TrialOne_manhb_mildmodhbret_15_15==T|
TrialOne_manhb_mildmodhbret_16_16==T|
TrialOne_manhb_mildmodhbret_17_17==T|
TrialOne_manhb_mildmodhbret_18_18==T|
TrialOne_manhb_mildmodhbret_19_19==T|
TrialOne_manhb_mildmodhbret_20_20==T|
TrialOne_manhb_mildmodhbret_20_20==T|
TrialOne_manhb_mildmodhbret_21_21==T|
TrialOne_manhb_mildmodhbret_22_22==T|
TrialOne_manhb_mildmodhbret_23_23==T|
TrialOne_manhb_mildmodhbret_29_29==T|
TrialOne_manhb_mildmodhbret_30_30==T|
TrialOne_manhb_mildmodhbret_31_31==T|
TrialOne_manhb_mildmodhbret_32_32==T|
TrialOne_manhb_mildmodhbret_33_33==T|
TrialOne_manhb_mildmodhbret_34_34==T),
HbonTime_6:=TRUE]
prelimHB <- t2reboot[,.(N=.N,
Opportun_1=sum(Opportunity_anemia_screening_1, na.rm=T),
Success_1a=sum(HbonTime_1a, na.rm=T),
Success_1aFalse=sum(HbonTime_1a==FALSE, na.rm=T),
Success_1b=sum(HbonTime_1b, na.rm=T),
Success_1bFalse=sum(HbonTime_1b==FALSE, na.rm=T),
Success_1c=sum(HbonTime_1c, na.rm=T),
Success_1cFalse=sum(HbonTime_1c==FALSE, na.rm=T),
Opportun_2=sum(Opportunity_anemia_screening_2, na.rm=T),
Success_2a=sum(HbonTime_2a, na.rm=T),
Opportun_2=sum(Opportunity_anemia_screening_2, na.rm=T),
Success_2b=sum(HbonTime_2b, na.rm=T),
Success_2bFalse=sum(HbonTime_2b==F, na.rm=T),
Opportun_2=sum(Opportunity_anemia_screening_2, na.rm=T),
Success_2c=sum(HbonTime_2c, na.rm=T),
Success_2cFalse=sum(HbonTime_2c==F, na.rm=T),
Opportun_3=sum(Opportunity_anemia_screening_3, na.rm=T),
Success_3a=sum(HbonTime_3a, na.rm=T),
Success_3b=sum(HbonTime_3b, na.rm=T),
Success_3bFales=sum(HbonTime_3b==FALSE, na.rm=T),
Success_3c=sum(HbonTime_3c, na.rm=T),
Sucess_3cFalse=sum(HbonTime_3c==F, na.rm=T),
Opportun_4=sum(Opportunity_anemia_screening_4, na.rm=T),
Success_4a=sum(HbonTime_4a, na.rm=T),
Opportun_4=sum(Opportunity_anemia_screening_4, na.rm=T),
Success_4b=sum(HbonTime_4b, na.rm=T),
Screening4bF=sum(HbonTime_4b==F, na.rm=T),
Success_4c=sum(HbonTime_4c, na.rm=T),
Screening4cF=sum(HbonTime_4c==F, na.rm=T),
Opportun_5=sum(Opportunity_anemia_screening_5, na.rm=T),
Success_5=sum(HbonTime_5, na.rm=T),
success_5F=sum(HbonTime_5==F),
Opportun_6=sum(Opportunity_anemia_screening_6, na.rm=T),
Success_6=sum(HbonTime_6, na.rm=T),
success_6F=sum(HbonTime_6==F))]
openxlsx::write.xlsx(prelimHB,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruitment_prelim_Hb.xlsx",
lubridate::today())))
########## Attendance ##########
# making vars
t2reboot[,refHRhosp:= FALSE]
t2reboot[(TrialOne_manRef_HR_00_00==T|
TrialOne_manRef_HR_01_01==T|
TrialOne_manRef_HR_02_02==T|
TrialOne_manRef_HR_03_03==T|
TrialOne_manRef_HR_04_04==T|
TrialOne_manRef_HR_05_05==T|
TrialOne_manRef_HR_06_06==T|
TrialOne_manRef_HR_07_07==T|
TrialOne_manRef_HR_08_08==T|
TrialOne_manRef_HR_09_09==T|
TrialOne_manRef_HR_10_10==T|
TrialOne_manRef_HR_11_11==T|
TrialOne_manRef_HR_12_12==T|
TrialOne_manRef_HR_13_13==T|
TrialOne_manRef_HR_14_14==T)|
(TrialOne_manRef_Hosp_00_00==T|
TrialOne_manRef_Hosp_01_01==T|
TrialOne_manRef_Hosp_02_02==T|
TrialOne_manRef_Hosp_03_03==T|
TrialOne_manRef_Hosp_04_04==T|
TrialOne_manRef_Hosp_05_05==T|
TrialOne_manRef_Hosp_06_06==T|
TrialOne_manRef_Hosp_07_07==T|
TrialOne_manRef_Hosp_08_08==T|
TrialOne_manRef_Hosp_09_09==T|
TrialOne_manRef_Hosp_10_10==T|
TrialOne_manRef_Hosp_11_11==T|
TrialOne_manRef_Hosp_12_12==T|
TrialOne_manRef_Hosp_13_13==T|
TrialOne_manRef_Hosp_14_14==T),refHRhosp:=TRUE]
xtabs(~t2reboot$refHRhosp, addNA=T)
## Define Opportunities
# oppt 16 week visit
t2reboot[,Opp_1:= as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]"),Opp_1:=1]
t2reboot[bookgestagedays_cats %in% c("(0,104]") &
refHRhosp==T,Opp_1:=0]
xtabs(~t2reboot$Opp_1, addNA=T)
# oppt 18-22 visit
t2reboot[,Opp_2:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(104,125]")| Opp_1==1, Opp_2:=1]
xtabs(~t2reboot$Opp_2, addNA=T)
#removing opportunities
t2reboot[Opp_2==1 &
(TrialOne_manRef_HR_15_15==T|TrialOne_manRef_Hosp_15_15==T)|
(TrialOne_manRef_HR_16_16==T|TrialOne_manRef_Hosp_16_16==T)|
(TrialOne_manRef_HR_17_17==T|TrialOne_manRef_Hosp_17_17==T),
Opp_2:=Opp_2-1]
xtabs(~t2reboot$Opp_2, addNA=T)
# 24-28 week visit
t2reboot[,Opp_3:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(125,160]",
"(160,167]") | Opp_2==1, Opp_3:=1]
xtabs(~t2reboot$Opp_3, addNA=T)
# removing opportunities
t2reboot[Opp_3==1 & ((TrialOne_manRef_HR_18_18==T|TrialOne_manRef_Hosp_18_18==T)|
(TrialOne_manRef_HR_19_19==T|TrialOne_manRef_Hosp_19_19==T)|
(TrialOne_manRef_HR_20_20==T|TrialOne_manRef_Hosp_20_20==T)|
(TrialOne_manRef_HR_21_21==T |TrialOne_manRef_Hosp_21_21==T)|
(TrialOne_manRef_HR_22_22==T|TrialOne_manRef_Hosp_22_22==T)|
(TrialOne_manRef_HR_23_23==T|TrialOne_manRef_Hosp_23_23==T)),
Opp_3:=Opp_3-1]
xtabs(~t2reboot$Opp_3, addNA=T)
# 31-33 week visit
t2reboot[,Opp_4:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(160,167]",
"(167,202]",
"(202,216]")|Opp_3== 1, Opp_4:=1]
xtabs(~t2reboot$Opp_4, addNA=T)
# removing opportunities
t2reboot[Opp_4==1 &
((TrialOne_manRef_HR_24_24==T|TrialOne_manRef_Hosp_24_24==T)|
(TrialOne_manRef_HR_25_25==T|TrialOne_manRef_Hosp_25_25==T)|
(TrialOne_manRef_HR_26_26==T|TrialOne_manRef_Hosp_26_26==T)|
(TrialOne_manRef_HR_27_27==T|TrialOne_manRef_Hosp_27_27==T)|
(TrialOne_manRef_HR_28_28==T|TrialOne_manRef_Hosp_28_28==T)|
(TrialOne_manRef_HR_29_29==T|TrialOne_manRef_Hosp_29_29==T)|
(TrialOne_manRef_HR_30_30==T|TrialOne_manRef_Hosp_30_30==T)),
Opp_4:=Opp_4-1]
xtabs(~t2reboot$Opp_4, addNA=T)
# 35-37 week visit
t2reboot[,Opp_5:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(216,237]",
"(237,244]") | Opp_4==1, Opp_5:=1]
xtabs(~t2reboot$Opp_5, addNA=T)
t2reboot[Opp_5==1 &
((TrialOne_manRef_HR_31_31==T|TrialOne_manRef_Hosp_31_31==T)|
(TrialOne_manRef_HR_32_32==T|TrialOne_manRef_Hosp_32_32==T)|
(TrialOne_manRef_HR_33_33==T|TrialOne_manRef_Hosp_33_33==T)|
(TrialOne_manRef_HR_34_34==T|TrialOne_manRef_Hosp_34_34==T)),
Opp_5:=Opp_5-1]
xtabs(~t2reboot$Opp_5, addNA=T)
################ successes ##########
# 15-17 week visit
t2reboot[,Succ_1:=as.logical(NA)]
t2reboot[Opp_1==1, Succ_1:=FALSE]
t2reboot[Succ_1==F &
TrialOne_anvisitnew_15_17==T, Succ_1:=TRUE]
xtabs(~t2reboot$Succ_1, addNA=T)
# 18-22 week visit
t2reboot[,Succ_2:=as.logical(NA)]
t2reboot[Opp_2==1, Succ_2:=FALSE]
t2reboot[Succ_2==F & TrialOne_anvisitnew_18_22==T, Succ_2:=TRUE]
xtabs(~t2reboot$Succ_2, addNA=T)
# 24-28 week visit
t2reboot[,Succ_3:=as.logical(NA)]
t2reboot[Opp_3==1, Succ_3:=as.logical(FALSE)]
t2reboot[Succ_3==F & TrialOne_anvisitnew_24_28==T, Succ_3:=TRUE]
xtabs(~t2reboot$Succ_3, addNA=T)
# 31-33 week visit
t2reboot[,Succ_4:=as.logical(NA)]
t2reboot[Opp_4==1, Succ_4:=FALSE]
t2reboot[Succ_4==F & TrialOne_anvisitnew_31_33==T, Succ_4:=TRUE]
xtabs(~t2reboot$Succ_4, addNA=T)
# 35-37
t2reboot[,Succ_5:=as.logical(NA)]
t2reboot[Opp_5==1, Succ_5:=FALSE]
t2reboot[Succ_5==F & TrialOne_anvisitnew_35_37==T, Succ_5:=TRUE]
xtabs(~t2reboot$Succ_5, addNA=T)
prelimAtt <- t2reboot[gAtoday_days>=280 & gAtoday_days<=300,.(N=.N,
bookedb414=sum(bookgestagedays_cats=="(0,104]", na.rm = T),
ANC15_17Opps=sum(Opp_1,na.rm=T),
ANC15_17=sum(Succ_1, na.rm=T),
ANC15_17FALSE=sum(Succ_1==F, na.rm=T),
booked1515=sum(bookgestagedays_cats=="(104,125]", na.rm = T),
ANC18_22Opps=sum(Opp_2, na.rm=T),
ANC18_22=sum(Succ_2, na.rm=T),
ANC18_22FALSE=sum(Succ_2==F, na.rm=T),
booked1822=sum(bookgestagedays_cats=="(125,160]", na.rm = T),
booked2323=sum(bookgestagedays_cats=="(160,167]", na.rm = T),
ANC2428Opps=sum(!is.na(Opp_3), na.rm=T),
ANC24_28TRUE=sum(Succ_3, na.rm=T),
ANC24_28FALSE=sum(Succ_3==F, na.rm=T),
booked2428=sum(bookgestagedays_cats=="(167,202]", na.rm = T),
booked2930=sum(bookgestagedays_cats=="(202,216]", na.rm = T),
ANC31_33Opps=sum(Opp_4, na.rm=T),
ANC31_33=sum(Succ_4, na.rm=T),
ANC31_33FALSE=sum(Succ_4==F, na.rm=T),
Booked31_33=sum(bookgestagedays_cats=="(216,237]", na.rm = T),
Booked34_34=sum(bookgestagedays_cats=="(237,244]", na.rm = T),
ANC3537Opps=sum(Opp_5, na.rm=T),
ANC3537=sum(Succ_5, na.rm=T),
Booked35_37=sum(bookgestagedays_cats=="(244,265]",
na.rm = T))]
openxlsx::write.xlsx(prelimAtt,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruit_prelim_Attendance.xlsx",
lubridate::today())))
########## GDM ##########
###Redefining opportinites
t2reboot[,Opportunity_GDM_screening_1:=as.numeric(NA)]
t2reboot[,Opportunity_GDM_screening_2:=as.numeric(NA)]
t2reboot[,Opportunity_GDM_screening_3:=as.numeric(NA)]
t2reboot[,Opportunity_GDM_screening_4:=as.numeric(NA)]
#t2reboot[,Opportunity_GDM_Screening_5:=as.numeric(NA)]
# before 24
t2reboot[bookgestagedays_cats %in% c("(0,104]",
"(104,125]",
"(125,160]",
"(160,167]"),Opportunity_GDM_screening_1:=1]
#24-28
t2reboot[bookgestagedays_cats %in% c("(167,202]")|
TrialOne_anvisitnew_24_28==T,Opportunity_GDM_screening_2:=1]
# after 28
t2reboot[bookgestagedays_cats %in% c("(202,216]",
"(216,237]",
"(237,244]",
"(244,265]"), Opportunity_GDM_screening_3:=1]
# high rbs anywhere outside of the 24-28
t2reboot[(TrialOne_labbloodglu_high_00_14==T|
TrialOne_labbloodglu_high_15_17==T|
TrialOne_labbloodglu_high_18_22==T|
TrialOne_labbloodglu_high_23_23==T|
TrialOne_labbloodglu_high_29_30==T|
TrialOne_labbloodglu_high_31_33==T|
TrialOne_labbloodglu_high_34_34==T|
TrialOne_labbloodglu_high_35_37==T), Opportunity_GDM_screening_4:=1]
xtabs(~t2reboot$Opportunity_GDM_screening_1, addNA=T)
xtabs(~t2reboot$Opportunity_GDM_screening_2, addNA=T)
xtabs(~t2reboot$Opportunity_GDM_screening_3, addNA=T)
xtabs(~t2reboot$Opportunity_GDM_screening_4, addNA=T)
## Remove opportunities for people who were referred to HR or Hosp
#refHRHospmanRBG_1 rename to RefHr
t2reboot[,RefHr:=as.logical(NA)]
t2reboot[Opportunity_anemia_screening_1==1, RefHr:=FALSE]
t2reboot[(TrialOne_manRef_HR_00_00==T|
TrialOne_manRef_HR_01_01==T|
TrialOne_manRef_HR_02_02==T|
TrialOne_manRef_HR_03_03==T|
TrialOne_manRef_HR_04_04==T|
TrialOne_manRef_HR_05_05==T|
TrialOne_manRef_HR_06_06==T|
TrialOne_manRef_HR_07_07==T|
TrialOne_manRef_HR_08_08==T|
TrialOne_manRef_HR_09_09==T|
TrialOne_manRef_HR_10_10==T|
TrialOne_manRef_HR_11_11==T|
TrialOne_manRef_HR_12_12==T|
TrialOne_manRef_HR_13_13==T|
TrialOne_manRef_HR_14_14==T|
TrialOne_manRef_HR_15_15==T|
TrialOne_manRef_HR_16_16==T|
TrialOne_manRef_HR_17_17==T|
TrialOne_manRef_HR_18_18==T|
TrialOne_manRef_HR_19_19==T|
TrialOne_manRef_HR_20_20==T|
TrialOne_manRef_HR_21_21==T|
TrialOne_manRef_HR_22_22==T|
TrialOne_manRef_HR_23_23==T),
RefHr:=TRUE]
xtabs(~t2reboot$RefHr, addNA=T)
#refHrHosp_2 rename to refHr_2
t2reboot[,refHr_2:=(
TrialOne_refHR_29_29==T|
TrialOne_refHR_30_30==T|
TrialOne_refHR_31_31==T|
TrialOne_refHR_32_32==T|
TrialOne_refHR_33_33==T|
TrialOne_refHR_34_34==T|
TrialOne_refHR_35_35==T|
TrialOne_refHR_36_36==T|
TrialOne_refHR_35_37==T)]
t2reboot[Opportunity_GDM_screening_2==1 &
(TrialOne_anvisitnew_24_24 &
(RefHr==T))|
(TrialOne_anvisitnew_25_25 &
(RefHr==T|TrialOne_manRef_HR_24_24==T))|
(TrialOne_anvisitnew_26_26 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T))|
(TrialOne_anvisitnew_27_27 &
(RefHr==T|TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T))|
(TrialOne_anvisitnew_28_28 &
(RefHr==T|
TrialOne_manRef_HR_24_24==T|
TrialOne_manRef_HR_25_25==T|
TrialOne_manRef_HR_26_26==T|
TrialOne_manRef_HR_27_27==T)),
Opportunity_GDM_screening_2:=Opportunity_GDM_screening_2-1]
# checks
xtabs(~t2reboot$Opportunity_GDM_screening_2, addNA=T)
#Screening before 24 weeks: Creating one var for 3 possibilities
t2reboot[,screenb424:=as.logical(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]","(104,125]","(125,160]","(160,167]"),
screenb424:=F]
t2reboot[screenb424==F &
(booklabbloodglu_high==F | is.na(booklabbloodglu_high)) &
(!is.na(booklaburglu) | !is.na(booklabbloodglu)|!is.na(booklabfastbloodglu)),
screenb424:=T]
xtabs(~t2reboot$screenb424, addNA=T)
scrb424 <- t2reboot[,.(A=sum(ident_dhis2_control==T),
B=sum(ident_dhis2_control==F)),
keyby=.(screenb424)]
##Defining Successes
t2reboot[,GDMscreeningontime_1A:=as.logical(NA)]
t2reboot[,GDMscreeningontime_1B:=as.logical(NA)]
t2reboot[,GDMscreeningontime_1C:=as.logical(NA)]
t2reboot[screenb424==F,
GDMscreeningontime_1:=FALSE]
t2reboot[screenb424==T,
GDMscreeningontime_1:=TRUE]
xtabs(~t2reboot$GDMscreeningontime_1, addNA=T)
t2reboot[,GDMscreeningontime_1A:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_1==1 &
booklaburglu=="NEG",
GDMscreeningontime_1A:=TRUE]
t2reboot[,GDMscreeningontime_1B:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_1==1 &
booklaburglu=="POS" &
!is.na(booklabbloodglu), GDMscreeningontime_1B:=TRUE]
##### Need to add: and referred for 1C!!!! #####
t2reboot[,GDMscreeningontime_1C:=as.logical(NA)]
t2reboot[booklabbloodglu_high==T &
!is.na(booklabbloodglu), GDMscreeningontime_1C:=TRUE]
#24-28 weeks
t2reboot[,GDMscreeningontime_2:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_2==1 &
(TrialOne_labbloodglu_exists_24_24==F &
TrialOne_labbloodglu_exists_25_25==F &
TrialOne_labbloodglu_exists_26_26==F &
TrialOne_labbloodglu_exists_27_27==F &
TrialOne_labbloodglu_exists_28_28==F) &
(TrialOne_labfastbloodglu_exists_24_24==F &
TrialOne_labfastbloodglu_exists_25_25==F &
TrialOne_labfastbloodglu_exists_26_26==F &
TrialOne_labfastbloodglu_exists_27_27==F &
TrialOne_labfastbloodglu_exists_28_28==F), GDMscreeningontime_2:=F]
t2reboot[Opportunity_GDM_screening_2==1 &
(TrialOne_labbloodglu_exists_24_24==T|
TrialOne_labbloodglu_exists_25_25==T|
TrialOne_labbloodglu_exists_26_26==T|
TrialOne_labbloodglu_exists_27_27==T|
TrialOne_labbloodglu_exists_28_28==T) &
(TrialOne_labbloodglu_high_24_24==F|
TrialOne_labbloodglu_high_25_25==F|
TrialOne_labbloodglu_high_26_26==F|
TrialOne_labbloodglu_high_27_27==F|
TrialOne_labbloodglu_high_28_28==F)|
(TrialOne_labfastbloodglu_exists_24_24==T|
TrialOne_labfastbloodglu_exists_25_25==T|
TrialOne_labfastbloodglu_exists_26_26==T|
TrialOne_labfastbloodglu_exists_27_27==T|
TrialOne_labfastbloodglu_exists_28_28==T),GDMscreeningontime_2:=TRUE]
xtabs(~t2reboot$GDMscreeningontime_2, addNA=T)
#Screening after 28 weeks: Creating one var for 3 possibilities
t2reboot[,screenafter28:=as.logical(NA)]
t2reboot[bookgestagedays_cats %in% c("(202,216]","(216,237]","(237,244]","(244,265]"),
screenafter28:=F]
t2reboot[screenafter28==F &
(booklabbloodglu_high==F | is.na(booklabbloodglu_high)) &
(!is.na(booklabbloodglu)|!is.na(booklabfastbloodglu)),
screenafter28:=T]
xtabs(~t2reboot$screenafter28, addNA=T)
##Defining Success
t2reboot[,GDMscreeningontime_3:=as.logical(NA)]
t2reboot[screenafter28==F,
GDMscreeningontime_3:=FALSE]
t2reboot[screenafter28==T,GDMscreeningontime_3:=TRUE]
xtabs(~t2reboot$GDMscreeningontime_3, addNA=T)
#management fo high RBG outside of time windows
t2reboot[, GDMscreeningontime_4:=as.logical(NA)]
t2reboot[Opportunity_GDM_screening_4==1, GDMscreeningontime_4:= FALSE]
t2reboot[GDMscreeningontime_4==F &
(RefHr==T|refHr_2==T),GDMscreeningontime_4:=TRUE]
prelimGDM <- t2reboot[,.(N=.N,
Opportun_1=sum(Opportunity_GDM_screening_1==T, na.rm=T),
Success_1A=sum(GDMscreeningontime_1A==T, na.rm=T),
Success_1B=sum(GDMscreeningontime_1B==T, na.rm=T),
Success_1C=sum(GDMscreeningontime_1C==T, na.rm=T),
Screenb424=sum(screenb424==T, na.rm=T),
Screenb424False=sum(screenb424==F, na.rm=T),
Opportun_2=sum(Opportunity_GDM_screening_2, na.rm=T),
Success_2=sum(GDMscreeningontime_2==T, na.rm=T),
Opportun_3=sum(Opportunity_GDM_screening_3==T, na.rm=T),
Success_3=sum(GDMscreeningontime_3==T, na.rm=T),
screenafter28=sum(screenafter28==T, na.rm=T),
screenafter28False=sum(screenafter28==F, na.rm=T),
screenbtwn=sum(GDMscreeningontime_4==T, na.rm=T),
screenbtwnFalse=sum(GDMscreeningontime_4==F, na.rm=T),
Opportun_4=sum(Opportunity_GDM_screening_4==T, na.rm=T),
Succ_4=sum(GDMscreeningontime_4, na.rm=T),
keyby=.(str_TRIAL_2_Cluster))]
openxlsx::write.xlsx(prelimGDM,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruitment_prelim_GDM_percluster.xlsx",
lubridate::today())))
############## HTN ##############
# making vars
# refHRhosp variable made in attendance outcome
## Define Opportunities
# before 16 weeks
t2reboot[,bp_1a:= as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(0,104]"),
bp_1a:=1]
xtabs(~t2reboot$bp_1a, addNA=T)
# oppt 16 week visit
t2reboot[,bp_1:= as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(104,125]")| TrialOne_anvisitnew_15_17==T,
bp_1:=1]
t2reboot[bookgestagedays_cats %in% c("(104,125]") &
refHRhosp==T,bp_1:=0]
xtabs(~t2reboot$bp_1, addNA=T)
# oppt 18-22 visit
t2reboot[,bp_2:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(125,160]")|
TrialOne_anvisitnew_18_22==T, bp_2:=1]
xtabs(~t2reboot$bp_2, addNA=T)
#removing opportunities
t2reboot[bp_2==1 &
(TrialOne_manRef_HR_15_15==T|TrialOne_manRef_Hosp_15_15==T)|
(TrialOne_manRef_HR_16_16==T|TrialOne_manRef_Hosp_16_16==T)|
(TrialOne_manRef_HR_17_17==T|TrialOne_manRef_Hosp_17_17==T),
bp_2:=bp_2-1]
xtabs(~t2reboot$bp_2, addNA=T)
# 24-28 week visit
t2reboot[,bp_3:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(160,167]",
"(167,202]") |
TrialOne_anvisitnew_24_28==T, bp_3:=1]
xtabs(~t2reboot$bp_3, addNA=T)
# removing opportunities
t2reboot[bp_3==1 & ((TrialOne_manRef_HR_18_18==T|TrialOne_manRef_Hosp_18_18==T)|
(TrialOne_manRef_HR_19_19==T|TrialOne_manRef_Hosp_19_19==T)|
(TrialOne_manRef_HR_20_20==T|TrialOne_manRef_Hosp_20_20==T)|
(TrialOne_manRef_HR_21_21==T |TrialOne_manRef_Hosp_21_21==T)|
(TrialOne_manRef_HR_22_22==T|TrialOne_manRef_Hosp_22_22==T)|
(TrialOne_manRef_HR_23_23==T|TrialOne_manRef_Hosp_23_23==T)),
bp_3:=bp_3-1]
xtabs(~t2reboot$bp_3, addNA=T)
# 31-33 week visit
t2reboot[,bp_4:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(202,216]",
"(216,237]")|
TrialOne_anvisitnew_31_33==T, bp_4:=1]
xtabs(~t2reboot$bp_4, addNA=T)
# removing opportunities
t2reboot[bp_4==1 &
((TrialOne_manRef_HR_24_24==T|TrialOne_manRef_Hosp_24_24==T)|
(TrialOne_manRef_HR_25_25==T|TrialOne_manRef_Hosp_25_25==T)|
(TrialOne_manRef_HR_26_26==T|TrialOne_manRef_Hosp_26_26==T)|
(TrialOne_manRef_HR_27_27==T|TrialOne_manRef_Hosp_27_27==T)|
(TrialOne_manRef_HR_28_28==T|TrialOne_manRef_Hosp_28_28==T)|
(TrialOne_manRef_HR_29_29==T|TrialOne_manRef_Hosp_29_29==T)|
(TrialOne_manRef_HR_30_30==T|TrialOne_manRef_Hosp_30_30==T)),
bp_4:=bp_4-1]
xtabs(~t2reboot$bp_4, addNA=T)
# 35-37 week visit
t2reboot[,bp_5:=as.numeric(NA)]
t2reboot[bookgestagedays_cats %in% c("(237,244]", "(244,265]") |
TrialOne_anvisitnew_35_37, bp_5:=1]
xtabs(~t2reboot$bp_5, addNA=T)
t2reboot[bp_5==1 &
((TrialOne_manRef_HR_31_31==T|TrialOne_manRef_Hosp_31_31==T)|
(TrialOne_manRef_HR_32_32==T|TrialOne_manRef_Hosp_32_32==T)|
(TrialOne_manRef_HR_33_33==T|TrialOne_manRef_Hosp_33_33==T)|
(TrialOne_manRef_HR_34_34==T|TrialOne_manRef_Hosp_34_34==T)),
bp_5:=bp_5-1]
xtabs(~t2reboot$bp_5, addNA=T)
################ successes ##########
# before 15 weeks
# 15-17 week visit
t2reboot[,Succ_1a:=as.logical(NA)]
t2reboot[bp_1a==1, Succ_1a:=FALSE]
t2reboot[Succ_1a==F &
TrialOne_anbpsyst_present_00_14==T, Succ_1a:=TRUE]
xtabs(~t2reboot$Succ_1a, addNA=T)
xtabs(~t2reboot$bp_1a, addNA=T)
# 15-17 week visit
t2reboot[,Succ_1:=as.logical(NA)]
t2reboot[bp_1==1, Succ_1:=FALSE]
t2reboot[Succ_1==F &
TrialOne_anbpsyst_present_15_17==T, Succ_1:=TRUE]
xtabs(~t2reboot$bp_1, addNA=T)
xtabs(~t2reboot$Succ_1, addNA=T)
# 18-22 week visit
t2reboot[,Succ_2:=as.logical(NA)]
t2reboot[bp_2==1, Succ_2:=FALSE]
t2reboot[Succ_2==F & TrialOne_anbpsyst_present_18_22==T, Succ_2:=TRUE]
xtabs(~t2reboot$bp_2, addNA=T)
xtabs(~t2reboot$Succ_2, addNA=T)
# 24-28 week visit
t2reboot[,Succ_3:=as.logical(NA)]
t2reboot[bp_3==1, Succ_3:=as.logical(FALSE)]
t2reboot[Succ_3==F & TrialOne_anbpsyst_present_24_28==T, Succ_3:=TRUE]
xtabs(~t2reboot$bp_3, addNA=T)
xtabs(~t2reboot$Succ_3, addNA=T)
# 31-33 week visit
t2reboot[,Succ_4:=as.logical(NA)]
t2reboot[bp_4==1, Succ_4:=FALSE]
t2reboot[Succ_4==F & TrialOne_anbpsyst_present_31_33==T, Succ_4:=TRUE]
xtabs(~t2reboot$bp_4, addNA=T)
xtabs(~t2reboot$Succ_4, addNA=T)
# 35-37
t2reboot[,Succ_5:=as.logical(NA)]
t2reboot[bp_5==1, Succ_5:=FALSE]
t2reboot[Succ_5==F & TrialOne_anbpsyst_present_35_37==T, Succ_5:=TRUE]
xtabs(~t2reboot$bp_5, addNA=T)
xtabs(~t2reboot$Succ_5, addNA=T)
prelimHTN <- t2reboot[,.(N=.N,
Screenb415=sum(bp_1a==T, na.rm=T),
Success_1A=sum(Succ_1a==T, na.rm=T),
opport_15_17=sum(bp_1==T, na.rm=T),
Success_1=sum(Succ_1==T, na.rm=T),
opport_18_22=sum(bp_2==T, na.rm=T),
Success_2=sum(Succ_2==T, na.rm=T),
opport_24_28=sum(bp_3==T, na.rm=T),
Success_3=sum(Succ_3==T, na.rm=T),
opport_31_33=sum(bp_4==T, na.rm=T),
Success_4=sum(Succ_4==T, na.rm=T),
opport_35_37=sum(bp_5==T, na.rm=T),
Success_5=sum(Succ_5==T, na.rm=T))]
openxlsx::write.xlsx(prelimHTN,file.path(FOLDER_DATA_RESULTS,
"T2",
sprintf("%s_T2_recruitment_prelim_HTN.xlsx",
lubridate::today())))
|
#### Logistic Regression #####
#install.packages("cowplot", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(cowplot)
library(dplyr)
library(ggplot2)
data <- read.csv("C:/Users/saiprasad/Desktop/Fall 2019/Multi analysis/MVA/Project/Dataset/HCV-EGY-Data.csv")
attach(data)
Survivorship = data$Survivorship <- if_else( RNA.EOT>= 400000 , 'NC','C')
cbind(data.frame(Survivorship),data)
data$Survivorship <- as.factor(data$Survivorship)
#####################################
##
## Reformat the data so that it is
## 1) Easy to use (add nice column names)
## 2) Interpreted correctly by glm()..
##
#####################################
head(data) # you see data, but no column names
str(data)
# this shows that we need to tell R which columns contain factors it also shows us that there are some missing values. There are "?"s
## in the dataset. These are in the "ca" and "thal" columns. First, convert "?"s to NAs...
data[data == "?"] <- NA
## Now add factors for variables that are factors and clean up the factors that had missing data...
data[data$Gender == 1,]$Gender <- "M"
data[data$Gender == 2,]$Gender <- "F"
data$Gender <- as.factor(data$Gender)
data[data$Fever == 1,]$Fever <- "No"
data[data$Fever == 2,]$Fever <- "Yes"
data$Fever <- as.factor(data$Fever)
data[data$Nausea.Vomting == 1,]$Nausea.Vomting <- "No"
data[data$Nausea.Vomting == 2,]$Nausea.Vomting <- "Yes"
data$Nausea.Vomting <- as.factor(data$Nausea.Vomting)
data[data$Headache == 1,]$Headache <- "No"
data[data$Headache == 2,]$Headache <- "Yes"
data$Headache <- as.factor(data$Headache)
data[data$Diarrhea == 1,]$Diarrhea <- "No"
data[data$Diarrhea == 2,]$Diarrhea <- "Yes"
data$Diarrhea <- as.factor(data$Diarrhea)
data[data$Fatigue...generalized.bone.ache == 1,]$Fatigue...generalized.bone.ache <- "No"
data[data$Fatigue...generalized.bone.ache == 2,]$Fatigue...generalized.bone.ache <- "Yes"
data$Fatigue...generalized.bone.ache <- as.factor(data$Fatigue...generalized.bone.ache)
data[data$Jaundice == 1,]$Jaundice <- "No"
data[data$Jaundice == 2,]$Jaundice <- "Yes"
data$Jaundice <- as.factor(data$Jaundice)
data[data$Epigastric.pain == 1,]$Epigastric.pain <- "No"
data[data$Epigastric.pain == 2,]$Epigastric.pain <- "Yes"
data$Epigastric.pain <- as.factor(data$Epigastric.pain)
data[data$Baselinehistological.staging == 1,]$Baselinehistological.staging <- "Portal Fibrosis"
data[data$Baselinehistological.staging == 2,]$Baselinehistological.staging<- "Few Septa"
data[data$Baselinehistological.staging == 3,]$Baselinehistological.staging <- "Many Septa "
data[data$Baselinehistological.staging == 4,]$Baselinehistological.staging <- "Cirrhosis"
data$Baseline.histological.Grading <- as.factor(data$Baseline.histological.Grading)
data$Baselinehistological.staging <- as.factor(data$Baselinehistological.staging)
str(data)
###################################
xtabs(~ Survivorship + Gender, data=data)
xtabs(~ Survivorship + Fever, data=data)
xtabs(~ Survivorship + Nausea.Vomting, data=data)
xtabs(~ Survivorship + Headache, data=data)
xtabs(~ Survivorship + Diarrhea, data=data)
xtabs(~ Survivorship + Fatigue...generalized.bone.ache, data=data)
xtabs(~ Survivorship + Jaundice, data=data)
xtabs(~ Survivorship + Epigastric.pain, data=data)
xtabs(~ Survivorship + Baselinehistological.staging, data=data)
## Now we are ready for some logistic regression. First we'll create a very
## simple model that uses sex to predict heart disease
##
xtabs(~ Survivorship + Gender, data=data)
## Most of the females are healthy and most of the males are unhealthy.
## Being female is likely to decrease the odds in being unhealthy.
## In other words, if a sample is female, the odds are against it that it
## will be unhealthy
## Being male is likely to increase the odds in being unhealthy...
## In other words, if a sample is male, the odds are for it being unhealthy
logistic_simple <- glm(Survivorship ~ Gender, data=data, family="binomial")
summary(logistic_simple)
## The intercept is the log(odds) a female will be unhealthy. This is because
## female is the first factor in "sex" (the factors are ordered,
## alphabetically by default,"female", "male")
## Now let's look at the second coefficient...
## sexM 1.2737 0.2725 4.674 2.95e-06 ***
##
## sexM is the log(odds ratio) that tells us that if a sample has sex=M, the
## odds of being unhealthy are, on a log scale, 1.27 times greater than if
## a sample has sex=F.
female.log.odds <- log(253 /425)
female.log.odds
# Now you know how these are calculated
male.log.odds.ratio <- log((229 / 478) / (253/425))
male.log.odds.ratio
## Now calculate the overall "Pseudo R-squared" and its p-value
## NOTE: Since we are doing logistic regression...
## Null devaiance = 2*(0 - LogLikelihood(null model))
## = -2*LogLikihood(null model)
## Residual deviance = 2*(0 - LogLikelihood(proposed model))
## = -2*LogLikelihood(proposed model)
ll.null <- logistic_simple$null.deviance/-2
ll.proposed <- logistic_simple$deviance/-2
ll.null
ll.proposed
## McFadden's Pseudo R^2 = [ LL(Null) - LL(Proposed) ] / LL(Null)
(ll.null - ll.proposed) / ll.null
## chi-square value = 2*(LL(Proposed) - LL(Null))
## p-value = 1 - pchisq(chi-square value, df = 2-1)
1 - pchisq(2*(ll.proposed - ll.null), df=1)
1 - pchisq((logistic_simple$null.deviance - logistic_simple$deviance), df=1)
## Lastly, let's see what this logistic regression predicts, given
## that a patient is either female or male (and no other data about them).
predicted.data <- data.frame(probability.of.Survivorship=logistic_simple$fitted.values,Gender=data$Gender)
predicted.data
## We can plot the data...
ggplot(data=predicted.data, aes(x=Gender, y=probability.of.Survivorship)) +
geom_point(aes(color=Gender), size=5) +
xlab("Gender") +
ylab("Predicted probability of getting HCV Disease")
## Since there are only two probabilities (one for females and one for males),
## we can use a table to summarize the predicted probabilities.
xtabs(~ probability.of.Survivorship + Gender, data=predicted.data)
#####################################
##
## Now we will use all of the data available to predict heart disease. This is not the best way to do this
##
#####################################
logistic <- glm(Survivorship ~ ., data=data, family="binomial")
summary(logistic)
## Now calculate the overall "Pseudo R-squared" and its p-value
ll.null <- logistic$null.deviance/-2
ll.proposed <- logistic$deviance/-2
## McFadden's Pseudo R^2 = [ LL(Null) - LL(Proposed) ] / LL(Null)
(ll.null - ll.proposed) / ll.null
## The p-value for the R^2
1 - pchisq(2*(ll.proposed - ll.null), df=(length(logistic$coefficients)-1))
## now we can plot the data
predicted.data <- data.frame(probability.of.Survivorship=logistic$fitted.values,Survivorship=data$Survivorship)
predicted.data <- predicted.data[order(predicted.data$probability.of.Survivorship, decreasing=FALSE),]
predicted.data$rank <- 1:nrow(predicted.data)
## Lastly, we can plot the predicted probabilities for each sample having
## heart disease and color by whether or not they actually had heart disease
ggplot(data=predicted.data, aes(x=rank, y=probability.of.Survivorship)) +
geom_point(aes(color=Survivorship), alpha=1, shape=4, stroke=2) +
xlab("Index") +
ylab("Predicted probability of getting HCV disease")
# Few packages for confusion matrix. Lets look at them one by one
#install.packages("regclass", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(regclass)
confusion_matrix(logistic)
#install.packages("caret", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(caret)
pdata <- predict(logistic,newdata=data,type="response" )
pdata
data$Survivorship
#pdataF <- as.factor(ifelse(test=as.numeric(pdata>0.5) == 0, yes="Healthy", no="Unhealthy"))
#install.packages("e1071", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(e1071)
#confusionMatrix(pdataF, data$Survivorship)
#install.packages("pROC", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(pROC)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE)
par(pty = "s")
roc(data$Survivorship,logistic$fitted.values,plot=TRUE)
## NOTE: By default, roc() uses specificity on the x-axis and the values range
## from 1 to 0. This makes the graph look like what we would expect, but the
## x-axis itself might induce a headache. To use 1-specificity (i.e. the
## False Positive Rate) on the x-axis, set "legacy.axes" to TRUE.
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage")
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4)
## If we want to find out the optimal threshold we can store the
## data used to make the ROC graph in a variable...
roc.info <- roc(data$Survivorship, logistic$fitted.values, legacy.axes=TRUE)
str(roc.info)
roc.df <- data.frame(tpp=roc.info$sensitivities*100, ## tpp = true positive percentage
fpp=(1 - roc.info$specificities)*100, ## fpp = false positive precentage
thresholds=roc.info$thresholds)
roc.df
head(roc.df) ## head() will show us the values for the upper right-hand corner of the ROC graph, when the threshold is so low
## (negative infinity) that every single sample is called "obese".
## Thus TPP = 100% and FPP = 100%
tail(roc.df) ## tail() will show us the values for the lower left-hand corner
## of the ROC graph, when the threshold is so high (infinity)
## that every single sample is called "not obese".
## Thus, TPP = 0% and FPP = 0%
## now let's look at the thresholds between TPP 60% and 80%
roc.df[roc.df$tpp > 60 & roc.df$tpp < 80,]
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, percent=TRUE)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, percent=TRUE, print.auc=TRUE)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, percent=TRUE, print.auc=TRUE, partial.auc=c(100, 90), auc.polygon = TRUE, auc.polygon.col = "#377eb822", print.auc.x=45)
# Lets do two roc plots to understand which model is better
roc(data$Survivorship, logistic_simple$fitted.values, plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, print.auc=TRUE)
# Lets add the other graph
plot.roc(data$Survivorship, logistic$fitted.values, percent=TRUE, col="#4daf4a", lwd=4, print.auc=TRUE, add=TRUE, print.auc.y=40)
legend("bottomright", legend=c("Simple", "Non Simple"), col=c("#377eb8", "#4daf4a"), lwd=4) # Make it user friendly
|
/log_reg.R
|
no_license
|
kaustubhchalke/Data-set-for-Hepatitis-C-Virus-HCV-for-Egyptian-patients
|
R
| false
| false
| 11,440
|
r
|
#### Logistic Regression #####
#install.packages("cowplot", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(cowplot)
library(dplyr)
library(ggplot2)
data <- read.csv("C:/Users/saiprasad/Desktop/Fall 2019/Multi analysis/MVA/Project/Dataset/HCV-EGY-Data.csv")
attach(data)
Survivorship = data$Survivorship <- if_else( RNA.EOT>= 400000 , 'NC','C')
cbind(data.frame(Survivorship),data)
data$Survivorship <- as.factor(data$Survivorship)
#####################################
##
## Reformat the data so that it is
## 1) Easy to use (add nice column names)
## 2) Interpreted correctly by glm()..
##
#####################################
head(data) # you see data, but no column names
str(data)
# this shows that we need to tell R which columns contain factors it also shows us that there are some missing values. There are "?"s
## in the dataset. These are in the "ca" and "thal" columns. First, convert "?"s to NAs...
data[data == "?"] <- NA
## Now add factors for variables that are factors and clean up the factors that had missing data...
data[data$Gender == 1,]$Gender <- "M"
data[data$Gender == 2,]$Gender <- "F"
data$Gender <- as.factor(data$Gender)
data[data$Fever == 1,]$Fever <- "No"
data[data$Fever == 2,]$Fever <- "Yes"
data$Fever <- as.factor(data$Fever)
data[data$Nausea.Vomting == 1,]$Nausea.Vomting <- "No"
data[data$Nausea.Vomting == 2,]$Nausea.Vomting <- "Yes"
data$Nausea.Vomting <- as.factor(data$Nausea.Vomting)
data[data$Headache == 1,]$Headache <- "No"
data[data$Headache == 2,]$Headache <- "Yes"
data$Headache <- as.factor(data$Headache)
data[data$Diarrhea == 1,]$Diarrhea <- "No"
data[data$Diarrhea == 2,]$Diarrhea <- "Yes"
data$Diarrhea <- as.factor(data$Diarrhea)
data[data$Fatigue...generalized.bone.ache == 1,]$Fatigue...generalized.bone.ache <- "No"
data[data$Fatigue...generalized.bone.ache == 2,]$Fatigue...generalized.bone.ache <- "Yes"
data$Fatigue...generalized.bone.ache <- as.factor(data$Fatigue...generalized.bone.ache)
data[data$Jaundice == 1,]$Jaundice <- "No"
data[data$Jaundice == 2,]$Jaundice <- "Yes"
data$Jaundice <- as.factor(data$Jaundice)
data[data$Epigastric.pain == 1,]$Epigastric.pain <- "No"
data[data$Epigastric.pain == 2,]$Epigastric.pain <- "Yes"
data$Epigastric.pain <- as.factor(data$Epigastric.pain)
data[data$Baselinehistological.staging == 1,]$Baselinehistological.staging <- "Portal Fibrosis"
data[data$Baselinehistological.staging == 2,]$Baselinehistological.staging<- "Few Septa"
data[data$Baselinehistological.staging == 3,]$Baselinehistological.staging <- "Many Septa "
data[data$Baselinehistological.staging == 4,]$Baselinehistological.staging <- "Cirrhosis"
data$Baseline.histological.Grading <- as.factor(data$Baseline.histological.Grading)
data$Baselinehistological.staging <- as.factor(data$Baselinehistological.staging)
str(data)
###################################
xtabs(~ Survivorship + Gender, data=data)
xtabs(~ Survivorship + Fever, data=data)
xtabs(~ Survivorship + Nausea.Vomting, data=data)
xtabs(~ Survivorship + Headache, data=data)
xtabs(~ Survivorship + Diarrhea, data=data)
xtabs(~ Survivorship + Fatigue...generalized.bone.ache, data=data)
xtabs(~ Survivorship + Jaundice, data=data)
xtabs(~ Survivorship + Epigastric.pain, data=data)
xtabs(~ Survivorship + Baselinehistological.staging, data=data)
## Now we are ready for some logistic regression. First we'll create a very
## simple model that uses sex to predict heart disease
##
xtabs(~ Survivorship + Gender, data=data)
## Most of the females are healthy and most of the males are unhealthy.
## Being female is likely to decrease the odds in being unhealthy.
## In other words, if a sample is female, the odds are against it that it
## will be unhealthy
## Being male is likely to increase the odds in being unhealthy...
## In other words, if a sample is male, the odds are for it being unhealthy
logistic_simple <- glm(Survivorship ~ Gender, data=data, family="binomial")
summary(logistic_simple)
## The intercept is the log(odds) a female will be unhealthy. This is because
## female is the first factor in "sex" (the factors are ordered,
## alphabetically by default,"female", "male")
## Now let's look at the second coefficient...
## sexM 1.2737 0.2725 4.674 2.95e-06 ***
##
## sexM is the log(odds ratio) that tells us that if a sample has sex=M, the
## odds of being unhealthy are, on a log scale, 1.27 times greater than if
## a sample has sex=F.
female.log.odds <- log(253 /425)
female.log.odds
# Now you know how these are calculated
male.log.odds.ratio <- log((229 / 478) / (253/425))
male.log.odds.ratio
## Now calculate the overall "Pseudo R-squared" and its p-value
## NOTE: Since we are doing logistic regression...
## Null devaiance = 2*(0 - LogLikelihood(null model))
## = -2*LogLikihood(null model)
## Residual deviance = 2*(0 - LogLikelihood(proposed model))
## = -2*LogLikelihood(proposed model)
ll.null <- logistic_simple$null.deviance/-2
ll.proposed <- logistic_simple$deviance/-2
ll.null
ll.proposed
## McFadden's Pseudo R^2 = [ LL(Null) - LL(Proposed) ] / LL(Null)
(ll.null - ll.proposed) / ll.null
## chi-square value = 2*(LL(Proposed) - LL(Null))
## p-value = 1 - pchisq(chi-square value, df = 2-1)
1 - pchisq(2*(ll.proposed - ll.null), df=1)
1 - pchisq((logistic_simple$null.deviance - logistic_simple$deviance), df=1)
## Lastly, let's see what this logistic regression predicts, given
## that a patient is either female or male (and no other data about them).
predicted.data <- data.frame(probability.of.Survivorship=logistic_simple$fitted.values,Gender=data$Gender)
predicted.data
## We can plot the data...
ggplot(data=predicted.data, aes(x=Gender, y=probability.of.Survivorship)) +
geom_point(aes(color=Gender), size=5) +
xlab("Gender") +
ylab("Predicted probability of getting HCV Disease")
## Since there are only two probabilities (one for females and one for males),
## we can use a table to summarize the predicted probabilities.
xtabs(~ probability.of.Survivorship + Gender, data=predicted.data)
#####################################
##
## Now we will use all of the data available to predict heart disease. This is not the best way to do this
##
#####################################
logistic <- glm(Survivorship ~ ., data=data, family="binomial")
summary(logistic)
## Now calculate the overall "Pseudo R-squared" and its p-value
ll.null <- logistic$null.deviance/-2
ll.proposed <- logistic$deviance/-2
## McFadden's Pseudo R^2 = [ LL(Null) - LL(Proposed) ] / LL(Null)
(ll.null - ll.proposed) / ll.null
## The p-value for the R^2
1 - pchisq(2*(ll.proposed - ll.null), df=(length(logistic$coefficients)-1))
## now we can plot the data
predicted.data <- data.frame(probability.of.Survivorship=logistic$fitted.values,Survivorship=data$Survivorship)
predicted.data <- predicted.data[order(predicted.data$probability.of.Survivorship, decreasing=FALSE),]
predicted.data$rank <- 1:nrow(predicted.data)
## Lastly, we can plot the predicted probabilities for each sample having
## heart disease and color by whether or not they actually had heart disease
ggplot(data=predicted.data, aes(x=rank, y=probability.of.Survivorship)) +
geom_point(aes(color=Survivorship), alpha=1, shape=4, stroke=2) +
xlab("Index") +
ylab("Predicted probability of getting HCV disease")
# Few packages for confusion matrix. Lets look at them one by one
#install.packages("regclass", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(regclass)
confusion_matrix(logistic)
#install.packages("caret", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(caret)
pdata <- predict(logistic,newdata=data,type="response" )
pdata
data$Survivorship
#pdataF <- as.factor(ifelse(test=as.numeric(pdata>0.5) == 0, yes="Healthy", no="Unhealthy"))
#install.packages("e1071", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(e1071)
#confusionMatrix(pdataF, data$Survivorship)
#install.packages("pROC", lib="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
library(pROC)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE)
par(pty = "s")
roc(data$Survivorship,logistic$fitted.values,plot=TRUE)
## NOTE: By default, roc() uses specificity on the x-axis and the values range
## from 1 to 0. This makes the graph look like what we would expect, but the
## x-axis itself might induce a headache. To use 1-specificity (i.e. the
## False Positive Rate) on the x-axis, set "legacy.axes" to TRUE.
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage")
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4)
## If we want to find out the optimal threshold we can store the
## data used to make the ROC graph in a variable...
roc.info <- roc(data$Survivorship, logistic$fitted.values, legacy.axes=TRUE)
str(roc.info)
roc.df <- data.frame(tpp=roc.info$sensitivities*100, ## tpp = true positive percentage
fpp=(1 - roc.info$specificities)*100, ## fpp = false positive precentage
thresholds=roc.info$thresholds)
roc.df
head(roc.df) ## head() will show us the values for the upper right-hand corner of the ROC graph, when the threshold is so low
## (negative infinity) that every single sample is called "obese".
## Thus TPP = 100% and FPP = 100%
tail(roc.df) ## tail() will show us the values for the lower left-hand corner
## of the ROC graph, when the threshold is so high (infinity)
## that every single sample is called "not obese".
## Thus, TPP = 0% and FPP = 0%
## now let's look at the thresholds between TPP 60% and 80%
roc.df[roc.df$tpp > 60 & roc.df$tpp < 80,]
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, percent=TRUE)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, percent=TRUE, print.auc=TRUE)
roc(data$Survivorship,logistic$fitted.values,plot=TRUE, legacy.axes=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, percent=TRUE, print.auc=TRUE, partial.auc=c(100, 90), auc.polygon = TRUE, auc.polygon.col = "#377eb822", print.auc.x=45)
# Lets do two roc plots to understand which model is better
roc(data$Survivorship, logistic_simple$fitted.values, plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, print.auc=TRUE)
# Lets add the other graph
plot.roc(data$Survivorship, logistic$fitted.values, percent=TRUE, col="#4daf4a", lwd=4, print.auc=TRUE, add=TRUE, print.auc.y=40)
legend("bottomright", legend=c("Simple", "Non Simple"), col=c("#377eb8", "#4daf4a"), lwd=4) # Make it user friendly
|
## R code for Exploratory Data Analysis Class Project #1
## File is used to create a plot of the household electric consumption data from the UC
## Learning Repository Irvine Machine
## The data was retrieved from the UCI website (https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption)
## on 2015-05-09
## Plot #4 is a four chart collection of readings for 2007-02-01 and 2007-02-02
##charts include the outputs from Plot 2 and Plot 3, plus an additional line
##chart for Voltage v. date and plot for Global Reactive Power
## read in data set
## determine the classes of the data, based on a few rows, to increase efficiency
## of read.table(). From http://www.biostat.jhsph.edu/~rpeng/docs/R-large-tables.html
learnData <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 5)
classes <- sapply(learnData, class)
allData <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings ="?", colClasses = classes)
##Convert Date from factor to date
allData$Date <- as.Date(allData$Date, format = "%d/%m/%Y")
##subset data for only the data in question 2007-02-01 and 2007-02-02
dataToPlot <- subset(allData, Date == "2007-02-01" | Date == "2007-02-02")
##combine Date and Time
dataToPlot$Date <-paste(dataToPlot$Date, dataToPlot$Time, sep = " ")
##remove the unneeded Time column
dataToPlot$Time <- NULL
##convert dates and times from factors to date and time classes
dataToPlot$Date <- strptime(dataToPlot$Date, "%Y-%m-%d %H:%M:%S")
##create the PNG file
png(file = "Plot4.png", width = 480, height = 480, units = "px")
##create a 2x2 plot area and set the margins to something reasonable
par(mfrow = c(2, 2), mar = c(4, 4, 2, 1))
##create the first plot row 1, item 1 - line chart with single line: Global Active Power v. date
plot(x = dataToPlot$Date, y = dataToPlot$Global_active_power, xlab = " ", ylab = 'Global Active Power (kilowatts)', col = "Black", type = "l")
##create the second plot row 1, item 2 - line chart with single line: Voltage v. date
plot(x = dataToPlot$Date, y = dataToPlot$Voltage, xlab = "dateTime", ylab = 'Voltage', col = "Black", type = "l")
##create the third plot row 2, item 1 - line chart with the first sub metering observations
plot(x = dataToPlot$Date, y = dataToPlot$Sub_metering_1, xlab = " ", ylab = 'Energy sub metering', col = "Black", type = "l")
##add the other two lines - sub metering 2 and sub metering 3
lines(x = dataToPlot$Date, y = dataToPlot$Sub_metering_2, col = "Red", type = "l", lwd = 2)
lines(x = dataToPlot$Date, y = dataToPlot$Sub_metering_3, col = "Blue", type = "l", lwd = 2)
##add the legend with the titles and lines
legend("topright", c("Sub Metering 1", "Sub Metering 2", "Sub Metering 3"), border = NULL, lty = c(1, 1, 1), lwd = c(2.5, 2.5, 2.5), col = c("Black", "Red", "Blue"))
## create the fourth plot, row 2, item 2
plot(x = dataToPlot$Date, y = dataToPlot$Global_reactive_power, xlab = "dateTime", ylab = 'Global_reactive_power', col = "Black", type = "l")
##complete the file
dev.off()
|
/Plot4.R
|
no_license
|
mindymon/ExData_Plotting1
|
R
| false
| false
| 3,087
|
r
|
## R code for Exploratory Data Analysis Class Project #1
## File is used to create a plot of the household electric consumption data from the UC
## Learning Repository Irvine Machine
## The data was retrieved from the UCI website (https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption)
## on 2015-05-09
## Plot #4 is a four chart collection of readings for 2007-02-01 and 2007-02-02
##charts include the outputs from Plot 2 and Plot 3, plus an additional line
##chart for Voltage v. date and plot for Global Reactive Power
## read in data set
## determine the classes of the data, based on a few rows, to increase efficiency
## of read.table(). From http://www.biostat.jhsph.edu/~rpeng/docs/R-large-tables.html
learnData <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 5)
classes <- sapply(learnData, class)
allData <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings ="?", colClasses = classes)
##Convert Date from factor to date
allData$Date <- as.Date(allData$Date, format = "%d/%m/%Y")
##subset data for only the data in question 2007-02-01 and 2007-02-02
dataToPlot <- subset(allData, Date == "2007-02-01" | Date == "2007-02-02")
##combine Date and Time
dataToPlot$Date <-paste(dataToPlot$Date, dataToPlot$Time, sep = " ")
##remove the unneeded Time column
dataToPlot$Time <- NULL
##convert dates and times from factors to date and time classes
dataToPlot$Date <- strptime(dataToPlot$Date, "%Y-%m-%d %H:%M:%S")
##create the PNG file
png(file = "Plot4.png", width = 480, height = 480, units = "px")
##create a 2x2 plot area and set the margins to something reasonable
par(mfrow = c(2, 2), mar = c(4, 4, 2, 1))
##create the first plot row 1, item 1 - line chart with single line: Global Active Power v. date
plot(x = dataToPlot$Date, y = dataToPlot$Global_active_power, xlab = " ", ylab = 'Global Active Power (kilowatts)', col = "Black", type = "l")
##create the second plot row 1, item 2 - line chart with single line: Voltage v. date
plot(x = dataToPlot$Date, y = dataToPlot$Voltage, xlab = "dateTime", ylab = 'Voltage', col = "Black", type = "l")
##create the third plot row 2, item 1 - line chart with the first sub metering observations
plot(x = dataToPlot$Date, y = dataToPlot$Sub_metering_1, xlab = " ", ylab = 'Energy sub metering', col = "Black", type = "l")
##add the other two lines - sub metering 2 and sub metering 3
lines(x = dataToPlot$Date, y = dataToPlot$Sub_metering_2, col = "Red", type = "l", lwd = 2)
lines(x = dataToPlot$Date, y = dataToPlot$Sub_metering_3, col = "Blue", type = "l", lwd = 2)
##add the legend with the titles and lines
legend("topright", c("Sub Metering 1", "Sub Metering 2", "Sub Metering 3"), border = NULL, lty = c(1, 1, 1), lwd = c(2.5, 2.5, 2.5), col = c("Black", "Red", "Blue"))
## create the fourth plot, row 2, item 2
plot(x = dataToPlot$Date, y = dataToPlot$Global_reactive_power, xlab = "dateTime", ylab = 'Global_reactive_power', col = "Black", type = "l")
##complete the file
dev.off()
|
#############################################################################
# Copyright (c) 2012 Christophe Dutang
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#
#############################################################################
### utility functions for constrained equations in GNE
###
### R functions
###
#functions of the Constrained Equation Reformulation of the GNEP
#z = (x, lam, w)
#with size (n, m, m)
funCER <- function(z, dimx, dimlam,
grobj, arggrobj,
constr, argconstr,
grconstr, arggrconstr,
dimmu, joint, argjoint,
grjoint, arggrjoint,
echo=FALSE)
{
arg <- testargfunCER(z, dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
dimmu, joint, argjoint, grjoint, arggrjoint, echo)
dimx <- arg$dimx
dimlam <- arg$dimlam
dimmu <- arg$dimmu
n <- sum(arg$dimx)
m <- sum(arg$dimlam)
x <- z[1:n]
lam <- z[(n+1):(n+m)]
mu <- z[(n+m+1):(n+m+dimmu)]
w <- z[-(1:(n+m+dimmu))]
part1 <- funSSR(z[1:(n+m+dimmu)], dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl=phiFB, argcompl=NULL, dimmu, joint, argjoint, grjoint, arggrjoint)[1:n]
Constri <- function(i) arg$constr(z, i, arg$argconstr)
if(!is.null(arg$grconstr))
{
part2a <- unlist(sapply(1:arg$nplayer, Constri)) + w[1:m]
part3a <- lam * w[1:m]
}else
part2a <- part3a <- NULL
if(!is.null(arg$joint))
{
part2b <- arg$joint(x, arg$argjoint) + w[m+1:dimmu]
part3b <- mu * w[m+1:dimmu]
}else
part2b <- part3b <- NULL
c( part1, part2a, part2b, part3a, part3b )
}
#z = (x, lam, w)
#with size (n, m, m)
jacCER <- function(z, dimx, dimlam,
heobj, argheobj,
constr, argconstr,
grconstr, arggrconstr,
heconstr, argheconstr,
dimmu, joint, argjoint,
grjoint, arggrjoint,
hejoint, arghejoint,
echo=FALSE)
{
arg <- testargjacCER(z, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint, echo)
dimx <- arg$dimx
dimlam <- arg$dimlam
dimmu <- arg$dimmu
n <- sum(arg$dimx)
m <- sum(arg$dimlam)
p <- dimmu
x <- z[1:n]
lam <- z[(n+1):(n+m)]
mu <- z[(n+m+1):(n+m+p)]
w1 <- z[(n+m+p+1):(n+m+p+m)]
w2 <- z[(n+2*m+p+1):(n+2*m+2*p)]
nplayer <- arg$nplayer
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) )
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) )
GrjConstri <- function(i)
{
#i index for player
sapply(index4x[1,i]:index4x[2,i], function(j) arg$grconstr(z, i, j, arg$arggrconstr))
}
jacconstrij <- function(i, j)
{
#i index for player, j index for x_j
arg$grconstr(z, i, j, arg$arggrconstr)
}
jacjointj <- function(j)
{
#j index for x_j
arg$grjoint(z, j, arg$arggrjoint)
}
partSSR <- jacSSR(z[1:(n+m+p)], dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla=GrAphiFB, gcomplb=GrBphiFB, argcompl=NULL,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint)
#Hessian matrix of the Lagrangian
ggL <- partSSR[1:n, 1:n]
#gradient of the constraint function
if(!is.null(arg$heconstr))
gG <- partSSR[1:n, n+ 1:m]
#gradient of the joint function
if(!is.null(arg$hejoint))
gH <- partSSR[1:n, n+m+ 1:p]
#Jacobian of the constraint function
jacG <- matrix(0, m, n)
if(!is.null(arg$heconstr))
for(i in 1:nplayer)
for(j in 1:sum(dimx))
jacG[index4lam[1,i]:index4lam[2,i] , j] <- jacconstrij(i,j)
#Jacobian of the joint function
jacH <- matrix(0, p, n)
if(!is.null(arg$hejoint))
for(j in 1:sum(dimx))
jacH[, j] <- jacjointj(j)
m0mm <- matrix(0, m, m)
m0pp <- matrix(0, p, p)
m0mp <- matrix(0, m, p)
m0mn <- matrix(0, m, n)
m0pn <- matrix(0, p, n)
if(!is.null(arg$hejoint) && !is.null(arg$heconstr))
res <- rbind(cbind(ggL, gG, gH, t(m0mn), t(m0pn)),
cbind(jacG, m0mm, m0mp, diag(m), m0mp),
cbind(jacH, t(m0mp), m0pp, t(m0mp), diag(p)),
cbind(m0mn, diag(w1), m0mp, diag(lam), m0mp),
cbind(m0pn, t(m0mp), diag(w2), t(m0mp), diag(mu))
)
else if(is.null(arg$hejoint) && !is.null(arg$heconstr))
res <- rbind(cbind(ggL, gG, t(m0mn)),
cbind(jacG, m0mm, diag(m)),
cbind(m0mn, diag(w1), diag(lam))
)
else if(!is.null(arg$hejoint) && is.null(arg$heconstr))
res <- rbind(cbind(ggL, gH, t(m0pn)),
cbind(jacH, m0pp, diag(p)),
cbind(m0pn, diag(w2), diag(mu))
)
else
res <- ggL
return(res)
}
|
/R/util-CER.R
|
no_license
|
cran/GNE
|
R
| false
| false
| 6,758
|
r
|
#############################################################################
# Copyright (c) 2012 Christophe Dutang
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#
#############################################################################
### utility functions for constrained equations in GNE
###
### R functions
###
#functions of the Constrained Equation Reformulation of the GNEP
#z = (x, lam, w)
#with size (n, m, m)
funCER <- function(z, dimx, dimlam,
grobj, arggrobj,
constr, argconstr,
grconstr, arggrconstr,
dimmu, joint, argjoint,
grjoint, arggrjoint,
echo=FALSE)
{
arg <- testargfunCER(z, dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
dimmu, joint, argjoint, grjoint, arggrjoint, echo)
dimx <- arg$dimx
dimlam <- arg$dimlam
dimmu <- arg$dimmu
n <- sum(arg$dimx)
m <- sum(arg$dimlam)
x <- z[1:n]
lam <- z[(n+1):(n+m)]
mu <- z[(n+m+1):(n+m+dimmu)]
w <- z[-(1:(n+m+dimmu))]
part1 <- funSSR(z[1:(n+m+dimmu)], dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl=phiFB, argcompl=NULL, dimmu, joint, argjoint, grjoint, arggrjoint)[1:n]
Constri <- function(i) arg$constr(z, i, arg$argconstr)
if(!is.null(arg$grconstr))
{
part2a <- unlist(sapply(1:arg$nplayer, Constri)) + w[1:m]
part3a <- lam * w[1:m]
}else
part2a <- part3a <- NULL
if(!is.null(arg$joint))
{
part2b <- arg$joint(x, arg$argjoint) + w[m+1:dimmu]
part3b <- mu * w[m+1:dimmu]
}else
part2b <- part3b <- NULL
c( part1, part2a, part2b, part3a, part3b )
}
#z = (x, lam, w)
#with size (n, m, m)
jacCER <- function(z, dimx, dimlam,
heobj, argheobj,
constr, argconstr,
grconstr, arggrconstr,
heconstr, argheconstr,
dimmu, joint, argjoint,
grjoint, arggrjoint,
hejoint, arghejoint,
echo=FALSE)
{
arg <- testargjacCER(z, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint, echo)
dimx <- arg$dimx
dimlam <- arg$dimlam
dimmu <- arg$dimmu
n <- sum(arg$dimx)
m <- sum(arg$dimlam)
p <- dimmu
x <- z[1:n]
lam <- z[(n+1):(n+m)]
mu <- z[(n+m+1):(n+m+p)]
w1 <- z[(n+m+p+1):(n+m+p+m)]
w2 <- z[(n+2*m+p+1):(n+2*m+2*p)]
nplayer <- arg$nplayer
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) )
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) )
GrjConstri <- function(i)
{
#i index for player
sapply(index4x[1,i]:index4x[2,i], function(j) arg$grconstr(z, i, j, arg$arggrconstr))
}
jacconstrij <- function(i, j)
{
#i index for player, j index for x_j
arg$grconstr(z, i, j, arg$arggrconstr)
}
jacjointj <- function(j)
{
#j index for x_j
arg$grjoint(z, j, arg$arggrjoint)
}
partSSR <- jacSSR(z[1:(n+m+p)], dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla=GrAphiFB, gcomplb=GrBphiFB, argcompl=NULL,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint)
#Hessian matrix of the Lagrangian
ggL <- partSSR[1:n, 1:n]
#gradient of the constraint function
if(!is.null(arg$heconstr))
gG <- partSSR[1:n, n+ 1:m]
#gradient of the joint function
if(!is.null(arg$hejoint))
gH <- partSSR[1:n, n+m+ 1:p]
#Jacobian of the constraint function
jacG <- matrix(0, m, n)
if(!is.null(arg$heconstr))
for(i in 1:nplayer)
for(j in 1:sum(dimx))
jacG[index4lam[1,i]:index4lam[2,i] , j] <- jacconstrij(i,j)
#Jacobian of the joint function
jacH <- matrix(0, p, n)
if(!is.null(arg$hejoint))
for(j in 1:sum(dimx))
jacH[, j] <- jacjointj(j)
m0mm <- matrix(0, m, m)
m0pp <- matrix(0, p, p)
m0mp <- matrix(0, m, p)
m0mn <- matrix(0, m, n)
m0pn <- matrix(0, p, n)
if(!is.null(arg$hejoint) && !is.null(arg$heconstr))
res <- rbind(cbind(ggL, gG, gH, t(m0mn), t(m0pn)),
cbind(jacG, m0mm, m0mp, diag(m), m0mp),
cbind(jacH, t(m0mp), m0pp, t(m0mp), diag(p)),
cbind(m0mn, diag(w1), m0mp, diag(lam), m0mp),
cbind(m0pn, t(m0mp), diag(w2), t(m0mp), diag(mu))
)
else if(is.null(arg$hejoint) && !is.null(arg$heconstr))
res <- rbind(cbind(ggL, gG, t(m0mn)),
cbind(jacG, m0mm, diag(m)),
cbind(m0mn, diag(w1), diag(lam))
)
else if(!is.null(arg$hejoint) && is.null(arg$heconstr))
res <- rbind(cbind(ggL, gH, t(m0pn)),
cbind(jacH, m0pp, diag(p)),
cbind(m0pn, diag(w2), diag(mu))
)
else
res <- ggL
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hic2mat.R
\name{hic2mat}
\alias{hic2mat}
\title{extract HiC data from .hic file and convert it to squared HiC matrix}
\usage{
hic2mat(file = "path/to/file", chromosome1, chromosome2, resol,
method = "NONE")
}
\arguments{
\item{file}{the path to the .hic file.}
\item{chromosome1}{character number which specify chromosome to extract.}
\item{chromosome2}{for intra-chromosome data, set it same as chromosome 1;
for inter-chromosome, set to another chromosome.}
\item{resol}{resolution, i.e., bin size.}
\item{method}{specifies what data to extract, raw or normalized.
Must be one of "NONE", "VC", "VC_SQRT", "KR". "NONE" will gives raw counts.
VC is vanilla coverage, VC_SQRT is square root of vanilla coverage, and KR
is Knight-Ruiz or Balanced normalization.}
}
\value{
a squared HiC matrix that can be recongnized by get.scc.
}
\description{
extract HiC data from .hic file and convert it to squared HiC matrix
}
\references{
HiCRep: assessing the reproducibility of Hi-C data using a
stratum-adjusted correlation coefficient. Tao Yang, Feipeng Zhang, Galip
Gurkan Yardimci, Fan Song, Ross C Hardison, William Stafford Noble,
Feng Yue, Qunhua Li. Genome Research 2017. doi: 10.1101/gr.220640.117
}
|
/man/hic2mat.Rd
|
no_license
|
TaoYang-dev/hicrep
|
R
| false
| true
| 1,290
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hic2mat.R
\name{hic2mat}
\alias{hic2mat}
\title{extract HiC data from .hic file and convert it to squared HiC matrix}
\usage{
hic2mat(file = "path/to/file", chromosome1, chromosome2, resol,
method = "NONE")
}
\arguments{
\item{file}{the path to the .hic file.}
\item{chromosome1}{character number which specify chromosome to extract.}
\item{chromosome2}{for intra-chromosome data, set it same as chromosome 1;
for inter-chromosome, set to another chromosome.}
\item{resol}{resolution, i.e., bin size.}
\item{method}{specifies what data to extract, raw or normalized.
Must be one of "NONE", "VC", "VC_SQRT", "KR". "NONE" will gives raw counts.
VC is vanilla coverage, VC_SQRT is square root of vanilla coverage, and KR
is Knight-Ruiz or Balanced normalization.}
}
\value{
a squared HiC matrix that can be recongnized by get.scc.
}
\description{
extract HiC data from .hic file and convert it to squared HiC matrix
}
\references{
HiCRep: assessing the reproducibility of Hi-C data using a
stratum-adjusted correlation coefficient. Tao Yang, Feipeng Zhang, Galip
Gurkan Yardimci, Fan Song, Ross C Hardison, William Stafford Noble,
Feng Yue, Qunhua Li. Genome Research 2017. doi: 10.1101/gr.220640.117
}
|
library("lubridate");
library("dplyr");
h <- read.csv("household_power_consumption.txt", sep = ";", na.strings=c("?"));
hh <- mutate(h, ndate = dmy_hms(paste(Date, " " ,Time)) );
data <- filter(hh, ndate >= ymd_hms("2007-2-1 0:0:0") & ndate <= ymd_hms("2007-2-2 23:59:59"));
png("plot1.png")
hist(data$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red");
dev.off()
|
/plot1.R
|
no_license
|
jumarti/ExData_Plotting1
|
R
| false
| false
| 414
|
r
|
library("lubridate");
library("dplyr");
h <- read.csv("household_power_consumption.txt", sep = ";", na.strings=c("?"));
hh <- mutate(h, ndate = dmy_hms(paste(Date, " " ,Time)) );
data <- filter(hh, ndate >= ymd_hms("2007-2-1 0:0:0") & ndate <= ymd_hms("2007-2-2 23:59:59"));
png("plot1.png")
hist(data$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red");
dev.off()
|
#' @include glmnetUtils.r
NULL
#' @name cv.glmnet
#' @export
cv.glmnet <- function(x, ...)
UseMethod("cv.glmnet")
#' @rdname cv.glmnet
#' @method cv.glmnet default
#' @export
cv.glmnet.default <- function(x, ...)
glmnet::cv.glmnet(x, ...)
#' Formula interface for elastic net cross-validation with cv.glmnet
#'
#' @param x For the default method, a matrix of predictor variables.
#' @param formula A model formula; interaction terms are allowed and will be expanded per the usual rules for linear models.
#' @param data A data frame or matrix containing the variables in the formula.
#' @param weights An optional vector of case weights to be used in the fitting process. If missing, defaults to an unweighted fit.
#' @param offset An optional vector of offsets, an \emph{a priori} known component to be included in the linear predictor.
#' @param subset An optional vector specifying the subset of observations to be used to fit the model.
#' @param na.action A function which indicates what should happen when the data contains missing values. For the \code{predict} method, \code{na.action = na.pass} will predict missing values with \code{NA}; \code{na.omit} or \code{na.exclude} will drop them.
#' @param drop.unused.levels Should factors have unused levels dropped? Defaults to \code{FALSE}.
#' @param xlev A named list of character vectors giving the full set of levels to be assumed for each factor.
#' @param alpha The elastic net mixing parameter. See \code{\link[glmnet:glmnet]{glmnet::glmnet}} for more details.
#' @param nfolds The number of crossvalidation folds to use. See \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet}} for more details.
#' @param sparse Should the model matrix be in sparse format? This can save memory when dealing with many factor variables, each with many levels (but see the warning below).
#' @param use.model.frame Should the base \code{\link{model.frame}} function be used when constructing the model matrix? This is the standard method that most R modelling functions use, but has some disadvantages. The default is to avoid \code{model.frame} and construct the model matrix term-by-term; see \link[=glmnet.model.matrix]{discussion}.
#' @param ... For \code{cv.glmnet.formula} and \code{cv.glmnet.default}, other arguments to be passed to \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet}}; for the \code{predict} and \code{coef} methods, arguments to be passed to their counterparts in package \code{glmnet}.
#'
#' @details
#' The \code{cv.glmnet} function in this package is an S3 generic with a formula and a default method. The former calls the latter, and the latter is simply a direct call to the \code{cv.glmnet} function in package \code{glmnet}. All the arguments to \code{glmnet::cv.glmnet} are (or should be) supported.
#'
#' The code works in a similar manner to \code{lm}, \code{glm} and other modelling functions. The arguments are used to generate a \emph{model frame}, which is a data frame augmented with information about the roles the columns play in fitting the model. This is then turned into a \emph{model matrix} and a response vector, which are passed to \code{glmnet::glmnet} along with any arguments in \code{...}. If \code{sparse} is TRUE, then \code{Matrix::sparse.model.matrix} is used instead of \code{stats::model.matrix} to create the model matrix.
#'
#' The \code{predict} and \code{coef} methods are wrappers for the corresponding methods in the \code{glmnet} package. The former constructs a predictor model matrix from its \code{newdata} argument and passes that as the \code{newx} argument to \code{glmnet:::predict.cv.glmnet}.
#'
#' @return
#' For \code{cv.glmnet.formula}, an object of class \code{cv.glmnet.formula}. This is basically the same object created by \code{glmnet::cv.glmnet}, but with extra components to allow formula usage.
#'
#' @seealso
#' \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet}}, \code{\link[glmnet:predict.cv.glmnet]{glmnet:::predict.cv.glmnet}}, \code{\link[glmnet:coef.cv.glmnet]{glmnet:::coef.cv.glmnet}}, \code{\link{model.frame}}, \code{\link{model.matrix}}
#'
#' @examples
#' cv.glmnet(mpg ~ ., data=mtcars)
#'
#' cv.glmnet(Species ~ ., data=iris, family="multinomial")
#'
#' \dontrun{
#'
#' # Leukemia example dataset from Trevor Hastie's website
#' download.file("http://web.stanford.edu/~hastie/glmnet/glmnetData/Leukemia.RData",
#' "Leukemia.RData")
#' load("Leukemia.Rdata")
#' leuk <- do.call(data.frame, Leukemia)
#' cv.glmnet(y ~ ., leuk, family="binomial")
#' }
#' @rdname cv.glmnet
#' @method cv.glmnet formula
#' @importFrom glmnet cv.glmnet
#' @export
cv.glmnet.formula <- function(formula, data, alpha=1, nfolds=10, ..., weights=NULL, offset=NULL, subset=NULL,
na.action=getOption("na.action"), drop.unused.levels=FALSE, xlev=NULL,
sparse=FALSE, use.model.frame=FALSE)
{
# must use NSE to get model.frame emulation to work
cl <- match.call(expand.dots=FALSE)
cl[[1]] <- if(use.model.frame)
makeModelComponentsMF
else makeModelComponents
xy <- eval.parent(cl)
model <- glmnet::cv.glmnet(xy$x, xy$y, weights=xy$weights, offset=xy$offset, alpha=alpha, ...)
model$call <- match.call()
model$terms <- xy$terms
model$alpha <- alpha
model$nfolds <- nfolds
model$sparse <- sparse
model$use.model.frame <- use.model.frame
model$na.action <- na.action
class(model) <- c("cv.glmnet.formula", class(model))
model
}
#' @param object For the \code{predict} and \code{coef} methods, an object of class \code{cv.glmnet.formula}.
#' @param newdata For the \code{predict} method, a data frame containing the observations for which to calculate predictions.
#' @rdname cv.glmnet
#' @method predict cv.glmnet.formula
#' @export
predict.cv.glmnet.formula <- function(object, newdata, na.action=na.pass, ...)
{
if(!inherits(object, "cv.glmnet.formula"))
stop("invalid cv.glmnet.formula object")
# must use NSE to get model.frame emulation to work
cl <- match.call(expand.dots=FALSE)
cl$formula <- object$terms
cl$data <- cl$newdata
cl[[1]] <- if(object$use.model.frame)
makeModelComponentsMF
else makeModelComponents
xy <- eval.parent(cl)
x <- xy$x
offset <- xy$offset
class(object) <- class(object)[-1]
predict(object, x, ...)
}
#' @rdname cv.glmnet
#' @method coef cv.glmnet.formula
#' @export
coef.cv.glmnet.formula <- function(object, ...)
{
if(!inherits(object, "cv.glmnet.formula"))
stop("invalid cv.glmnet.formula object")
class(object) <- class(object)[-1]
coef(object, ...)
}
#' @rdname cv.glmnet
#' @method print cv.glmnet.formula
#' @export
print.cv.glmnet.formula <- function(x, ...)
{
cat("Call:\n")
dput(x$call)
cat("\nModel fitting options:")
cat("\n Sparse model matrix:", x$sparse)
cat("\n Use model.frame:", x$use.model.frame)
cat("\n Number of crossvalidation folds:", x$nfolds)
cat("\n Alpha:", x$alpha)
cat("\n Deviance-minimizing lambda:", x$lambda.min, " (+1 SE):", x$lambda.1se)
cat("\n")
invisible(x)
}
|
/R/cvGlmnetFormula.r
|
no_license
|
rferrali/glmnetUtils
|
R
| false
| false
| 7,134
|
r
|
#' @include glmnetUtils.r
NULL
#' @name cv.glmnet
#' @export
cv.glmnet <- function(x, ...)
UseMethod("cv.glmnet")
#' @rdname cv.glmnet
#' @method cv.glmnet default
#' @export
cv.glmnet.default <- function(x, ...)
glmnet::cv.glmnet(x, ...)
#' Formula interface for elastic net cross-validation with cv.glmnet
#'
#' @param x For the default method, a matrix of predictor variables.
#' @param formula A model formula; interaction terms are allowed and will be expanded per the usual rules for linear models.
#' @param data A data frame or matrix containing the variables in the formula.
#' @param weights An optional vector of case weights to be used in the fitting process. If missing, defaults to an unweighted fit.
#' @param offset An optional vector of offsets, an \emph{a priori} known component to be included in the linear predictor.
#' @param subset An optional vector specifying the subset of observations to be used to fit the model.
#' @param na.action A function which indicates what should happen when the data contains missing values. For the \code{predict} method, \code{na.action = na.pass} will predict missing values with \code{NA}; \code{na.omit} or \code{na.exclude} will drop them.
#' @param drop.unused.levels Should factors have unused levels dropped? Defaults to \code{FALSE}.
#' @param xlev A named list of character vectors giving the full set of levels to be assumed for each factor.
#' @param alpha The elastic net mixing parameter. See \code{\link[glmnet:glmnet]{glmnet::glmnet}} for more details.
#' @param nfolds The number of crossvalidation folds to use. See \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet}} for more details.
#' @param sparse Should the model matrix be in sparse format? This can save memory when dealing with many factor variables, each with many levels (but see the warning below).
#' @param use.model.frame Should the base \code{\link{model.frame}} function be used when constructing the model matrix? This is the standard method that most R modelling functions use, but has some disadvantages. The default is to avoid \code{model.frame} and construct the model matrix term-by-term; see \link[=glmnet.model.matrix]{discussion}.
#' @param ... For \code{cv.glmnet.formula} and \code{cv.glmnet.default}, other arguments to be passed to \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet}}; for the \code{predict} and \code{coef} methods, arguments to be passed to their counterparts in package \code{glmnet}.
#'
#' @details
#' The \code{cv.glmnet} function in this package is an S3 generic with a formula and a default method. The former calls the latter, and the latter is simply a direct call to the \code{cv.glmnet} function in package \code{glmnet}. All the arguments to \code{glmnet::cv.glmnet} are (or should be) supported.
#'
#' The code works in a similar manner to \code{lm}, \code{glm} and other modelling functions. The arguments are used to generate a \emph{model frame}, which is a data frame augmented with information about the roles the columns play in fitting the model. This is then turned into a \emph{model matrix} and a response vector, which are passed to \code{glmnet::glmnet} along with any arguments in \code{...}. If \code{sparse} is TRUE, then \code{Matrix::sparse.model.matrix} is used instead of \code{stats::model.matrix} to create the model matrix.
#'
#' The \code{predict} and \code{coef} methods are wrappers for the corresponding methods in the \code{glmnet} package. The former constructs a predictor model matrix from its \code{newdata} argument and passes that as the \code{newx} argument to \code{glmnet:::predict.cv.glmnet}.
#'
#' @return
#' For \code{cv.glmnet.formula}, an object of class \code{cv.glmnet.formula}. This is basically the same object created by \code{glmnet::cv.glmnet}, but with extra components to allow formula usage.
#'
#' @seealso
#' \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet}}, \code{\link[glmnet:predict.cv.glmnet]{glmnet:::predict.cv.glmnet}}, \code{\link[glmnet:coef.cv.glmnet]{glmnet:::coef.cv.glmnet}}, \code{\link{model.frame}}, \code{\link{model.matrix}}
#'
#' @examples
#' cv.glmnet(mpg ~ ., data=mtcars)
#'
#' cv.glmnet(Species ~ ., data=iris, family="multinomial")
#'
#' \dontrun{
#'
#' # Leukemia example dataset from Trevor Hastie's website
#' download.file("http://web.stanford.edu/~hastie/glmnet/glmnetData/Leukemia.RData",
#' "Leukemia.RData")
#' load("Leukemia.Rdata")
#' leuk <- do.call(data.frame, Leukemia)
#' cv.glmnet(y ~ ., leuk, family="binomial")
#' }
#' @rdname cv.glmnet
#' @method cv.glmnet formula
#' @importFrom glmnet cv.glmnet
#' @export
cv.glmnet.formula <- function(formula, data, alpha=1, nfolds=10, ..., weights=NULL, offset=NULL, subset=NULL,
na.action=getOption("na.action"), drop.unused.levels=FALSE, xlev=NULL,
sparse=FALSE, use.model.frame=FALSE)
{
# must use NSE to get model.frame emulation to work
cl <- match.call(expand.dots=FALSE)
cl[[1]] <- if(use.model.frame)
makeModelComponentsMF
else makeModelComponents
xy <- eval.parent(cl)
model <- glmnet::cv.glmnet(xy$x, xy$y, weights=xy$weights, offset=xy$offset, alpha=alpha, ...)
model$call <- match.call()
model$terms <- xy$terms
model$alpha <- alpha
model$nfolds <- nfolds
model$sparse <- sparse
model$use.model.frame <- use.model.frame
model$na.action <- na.action
class(model) <- c("cv.glmnet.formula", class(model))
model
}
#' @param object For the \code{predict} and \code{coef} methods, an object of class \code{cv.glmnet.formula}.
#' @param newdata For the \code{predict} method, a data frame containing the observations for which to calculate predictions.
#' @rdname cv.glmnet
#' @method predict cv.glmnet.formula
#' @export
predict.cv.glmnet.formula <- function(object, newdata, na.action=na.pass, ...)
{
if(!inherits(object, "cv.glmnet.formula"))
stop("invalid cv.glmnet.formula object")
# must use NSE to get model.frame emulation to work
cl <- match.call(expand.dots=FALSE)
cl$formula <- object$terms
cl$data <- cl$newdata
cl[[1]] <- if(object$use.model.frame)
makeModelComponentsMF
else makeModelComponents
xy <- eval.parent(cl)
x <- xy$x
offset <- xy$offset
class(object) <- class(object)[-1]
predict(object, x, ...)
}
#' @rdname cv.glmnet
#' @method coef cv.glmnet.formula
#' @export
coef.cv.glmnet.formula <- function(object, ...)
{
if(!inherits(object, "cv.glmnet.formula"))
stop("invalid cv.glmnet.formula object")
class(object) <- class(object)[-1]
coef(object, ...)
}
#' @rdname cv.glmnet
#' @method print cv.glmnet.formula
#' @export
print.cv.glmnet.formula <- function(x, ...)
{
cat("Call:\n")
dput(x$call)
cat("\nModel fitting options:")
cat("\n Sparse model matrix:", x$sparse)
cat("\n Use model.frame:", x$use.model.frame)
cat("\n Number of crossvalidation folds:", x$nfolds)
cat("\n Alpha:", x$alpha)
cat("\n Deviance-minimizing lambda:", x$lambda.min, " (+1 SE):", x$lambda.1se)
cat("\n")
invisible(x)
}
|
library(tidyverse)
stackoverflow <- read.csv("dados stackoverflow.csv")
stackoverflow$site <- c("stackoverflow")
superuser <- read.csv("dados superuser.csv")
superuser$site <- c("superuser")
data <- merge(stackoverflow, superuser, all = T)
write.csv(data, file = "redesing-2-data.csv")
|
/pos/redesigns/redesign-2.R
|
no_license
|
diogoflorencio/Visualizacao-de-Dados
|
R
| false
| false
| 286
|
r
|
library(tidyverse)
stackoverflow <- read.csv("dados stackoverflow.csv")
stackoverflow$site <- c("stackoverflow")
superuser <- read.csv("dados superuser.csv")
superuser$site <- c("superuser")
data <- merge(stackoverflow, superuser, all = T)
write.csv(data, file = "redesing-2-data.csv")
|
#' Maintain a list of previously processed files within a directory
#'
#' @param path A character vector of a directory containing files
#' @param pattern A pattern to match within the file names
#' @param qaqc.file The destination file of the data after QAQC/processing
#'
#' @return
#' @export
#'
#' @examples
checkpoint_directory <-
function(path,
pattern = NULL,
qaqc.file = NULL){
stopifnot(is.character(path),
is.null(pattern) | is.character(pattern),
fs::dir_exists(path))
if(!is.null(qaqc.file)){
stopifnot(is.character(qaqc.file),
fs::dir_exists(fs::path_dir(qaqc.file)))
}
raw_directory <-
fs::path_norm(path)
raw_files <-
fs::dir_ls(raw_directory,
glob = pattern)
checkpoint_file <-
fs::path(raw_directory,
"__processed_files.txt")
if(fs::file_exists(checkpoint_file)){
existing_qaqc <-
get_qaqc_file(checkpoint_file)
if(!is.null(qaqc.file) && qaqc.file != existing_qaqc){
stop("The checkpoint file ", checkpoint_file, " has the QAQC file listed as\n",
existing_qaqc,
"\n which does not match the supplied file\n",
qaqc.file)
}
processed_files <-
readr::read_lines(checkpoint_file,
skip = 2)
raw_files <-
raw_files[!match(fs::path_file(raw_files),
fs::path_file(processed_files),
nomatch = 0)]
} else {
if(is.null(qaqc.file)){
stop("qaqc.file must be supplied if ", checkpoint_file, " does not already exist.'")
}
write_lines(c("# Files that have been processed and incorporated into QAQC data.",
paste0("# ", qaqc.file)),
checkpoint_file)
}
return(raw_files)
}
#' Append data to an existing QAQC file and create a back-up
#'
#' @param data The data to write
#' @param input.files The files used in processing
#' @param ignore.names Logical indicating whether or not to check if the names
#' of the existing data and new data should match
#'
#' @return
#' @export
#'
#' @examples
write_qaqc <-
function(data,
input.files,
ignore.names = FALSE){
stopifnot(is.data.frame(data),
is.character(input.files),
"input_source" %in% names(data))
if(length(input.files) == 1){
input_directory <-
fs::path_dir(input.files)
} else {
input_directory <-
fs::path_common(input.files)
}
checkpoint_file <-
fs::path(input_directory,
"__processed_files.txt")
if(!fs::file_exists(checkpoint_file)){
stop("There is no checkpoint file, '__processed_files.txt' in ",
source.directory)
}
qaqc_file <-
get_qaqc_file(checkpoint_file)
if(fs::file_exists(qaqc_file)){
qaqc_data <-
suppressMessages(readr::read_csv(qaqc_file,
guess_max = 100000,
locale = readr::locale(tz = "EST")))
if(!identical(names(qaqc_data), names(data)) & !ignore.names){
# Check on set operations for automatic check
stop("Column names for QAQC do not mach column names for data")
}
fs::file_copy(qaqc_file,
fs::path_ext_set(qaqc_file, "bak"),
overwrite = TRUE)
if(nrow(qaqc_data) != 0){
data <-
map_dfc(data, as.character)
qaqc_data <-
map_dfc(qaqc_data, as.character)
data <-
dplyr::anti_join(data,
qaqc_data,
by = intersect(names(qaqc_data), names(data)))
}
if(nrow(data) > 0){
data <-
dplyr::bind_rows(data,
qaqc_data)
write.csv(data,
qaqc_file,
row.names = FALSE)}
} else {
write.csv(data,
qaqc_file,
row.names = FALSE)
}
readr::write_lines(fs::path_file(input.files),
checkpoint_file,
append = TRUE)
message("QAQC data were written to ", qaqc_file, "\n",
" ", checkpoint_file, " was updated with the processed file names\n")
}
## REMOVE QAQC DATA FUNCTION
# Deletes __procesed_files.txt and removes data from those files from QAQC
#' Remove a directory of processed data
#'
#' This will remove the __processed_files.txt file and delete the data from the
#' QAQC file specified in __processed_files.txt
#'
#' @param path The directory to 'unprocess'
#'
#' @return
#' @export
#'
#' @examples
unprocess_directory <-
function(path){
stopifnot(is.character(path),
fs::dir_exists(path),
fs::path_ext(path) != "csv")
# Read in processed file list from checkpont file
checkpoint_file <-
fs::path(path,
"__processed_files.txt")
processed_files <-
readr::read_lines(checkpoint_file,
skip = 2)
qaqc_file <-
get_qaqc_file(checkpoint_file)
if(!fs::file_exists(qaqc_file)){
stop("The QAQC file (", qaqc_file, ") listed in ", checkpoint_file, " does not exist")
}
# Read in QAQC data & remove observations
fs::file_copy(qaqc_file,
fs::path_ext_set(qaqc_file, "bak"),
overwrite = TRUE)
qaqc <-
suppressMessages(readr::read_csv(qaqc_file,
guess = 100000)) %>%
filter(!(input_source %in% processed_files))
# Save updated QAQC
if(nrow(qaqc) > 0){
write.csv(qaqc,
qaqc_file,
row.names = FALSE)
} else {
fs::file_delete(qaqc_file)
}
# Delete checkpoint file
fs::file_delete(checkpoint_file)
}
#' Extract the qaqc filename from __procesed_files.txt
#'
#' @param x The path to __processed_files.txt
#'
#' @return
#' @export
#'
#' @examples
get_qaqc_file <-
function(x){
readr::read_lines(x,
skip = 1,
n_max = 1) %>%
stringr::str_remove("# ")
}
|
/R/best_practices_functions.R
|
no_license
|
jpshanno/jpshanno
|
R
| false
| false
| 6,247
|
r
|
#' Maintain a list of previously processed files within a directory
#'
#' @param path A character vector of a directory containing files
#' @param pattern A pattern to match within the file names
#' @param qaqc.file The destination file of the data after QAQC/processing
#'
#' @return
#' @export
#'
#' @examples
checkpoint_directory <-
function(path,
pattern = NULL,
qaqc.file = NULL){
stopifnot(is.character(path),
is.null(pattern) | is.character(pattern),
fs::dir_exists(path))
if(!is.null(qaqc.file)){
stopifnot(is.character(qaqc.file),
fs::dir_exists(fs::path_dir(qaqc.file)))
}
raw_directory <-
fs::path_norm(path)
raw_files <-
fs::dir_ls(raw_directory,
glob = pattern)
checkpoint_file <-
fs::path(raw_directory,
"__processed_files.txt")
if(fs::file_exists(checkpoint_file)){
existing_qaqc <-
get_qaqc_file(checkpoint_file)
if(!is.null(qaqc.file) && qaqc.file != existing_qaqc){
stop("The checkpoint file ", checkpoint_file, " has the QAQC file listed as\n",
existing_qaqc,
"\n which does not match the supplied file\n",
qaqc.file)
}
processed_files <-
readr::read_lines(checkpoint_file,
skip = 2)
raw_files <-
raw_files[!match(fs::path_file(raw_files),
fs::path_file(processed_files),
nomatch = 0)]
} else {
if(is.null(qaqc.file)){
stop("qaqc.file must be supplied if ", checkpoint_file, " does not already exist.'")
}
write_lines(c("# Files that have been processed and incorporated into QAQC data.",
paste0("# ", qaqc.file)),
checkpoint_file)
}
return(raw_files)
}
#' Append data to an existing QAQC file and create a back-up
#'
#' @param data The data to write
#' @param input.files The files used in processing
#' @param ignore.names Logical indicating whether or not to check if the names
#' of the existing data and new data should match
#'
#' @return
#' @export
#'
#' @examples
write_qaqc <-
function(data,
input.files,
ignore.names = FALSE){
stopifnot(is.data.frame(data),
is.character(input.files),
"input_source" %in% names(data))
if(length(input.files) == 1){
input_directory <-
fs::path_dir(input.files)
} else {
input_directory <-
fs::path_common(input.files)
}
checkpoint_file <-
fs::path(input_directory,
"__processed_files.txt")
if(!fs::file_exists(checkpoint_file)){
stop("There is no checkpoint file, '__processed_files.txt' in ",
source.directory)
}
qaqc_file <-
get_qaqc_file(checkpoint_file)
if(fs::file_exists(qaqc_file)){
qaqc_data <-
suppressMessages(readr::read_csv(qaqc_file,
guess_max = 100000,
locale = readr::locale(tz = "EST")))
if(!identical(names(qaqc_data), names(data)) & !ignore.names){
# Check on set operations for automatic check
stop("Column names for QAQC do not mach column names for data")
}
fs::file_copy(qaqc_file,
fs::path_ext_set(qaqc_file, "bak"),
overwrite = TRUE)
if(nrow(qaqc_data) != 0){
data <-
map_dfc(data, as.character)
qaqc_data <-
map_dfc(qaqc_data, as.character)
data <-
dplyr::anti_join(data,
qaqc_data,
by = intersect(names(qaqc_data), names(data)))
}
if(nrow(data) > 0){
data <-
dplyr::bind_rows(data,
qaqc_data)
write.csv(data,
qaqc_file,
row.names = FALSE)}
} else {
write.csv(data,
qaqc_file,
row.names = FALSE)
}
readr::write_lines(fs::path_file(input.files),
checkpoint_file,
append = TRUE)
message("QAQC data were written to ", qaqc_file, "\n",
" ", checkpoint_file, " was updated with the processed file names\n")
}
## REMOVE QAQC DATA FUNCTION
# Deletes __procesed_files.txt and removes data from those files from QAQC
#' Remove a directory of processed data
#'
#' This will remove the __processed_files.txt file and delete the data from the
#' QAQC file specified in __processed_files.txt
#'
#' @param path The directory to 'unprocess'
#'
#' @return
#' @export
#'
#' @examples
unprocess_directory <-
function(path){
stopifnot(is.character(path),
fs::dir_exists(path),
fs::path_ext(path) != "csv")
# Read in processed file list from checkpont file
checkpoint_file <-
fs::path(path,
"__processed_files.txt")
processed_files <-
readr::read_lines(checkpoint_file,
skip = 2)
qaqc_file <-
get_qaqc_file(checkpoint_file)
if(!fs::file_exists(qaqc_file)){
stop("The QAQC file (", qaqc_file, ") listed in ", checkpoint_file, " does not exist")
}
# Read in QAQC data & remove observations
fs::file_copy(qaqc_file,
fs::path_ext_set(qaqc_file, "bak"),
overwrite = TRUE)
qaqc <-
suppressMessages(readr::read_csv(qaqc_file,
guess = 100000)) %>%
filter(!(input_source %in% processed_files))
# Save updated QAQC
if(nrow(qaqc) > 0){
write.csv(qaqc,
qaqc_file,
row.names = FALSE)
} else {
fs::file_delete(qaqc_file)
}
# Delete checkpoint file
fs::file_delete(checkpoint_file)
}
#' Extract the qaqc filename from __procesed_files.txt
#'
#' @param x The path to __processed_files.txt
#'
#' @return
#' @export
#'
#' @examples
get_qaqc_file <-
function(x){
readr::read_lines(x,
skip = 1,
n_max = 1) %>%
stringr::str_remove("# ")
}
|
out_merged_keep_probiotic_all_keep = out_probiotic_agg_all_use
out_merged_keep_antibiotic_all_keep = out_antibiotic_agg_all_use
colorRange_1 <- colorRampPalette(c("#FFB396","#FF4646"))
colorRange_2 <- colorRampPalette(c("#FF4646","#3B14A7"))
colorsAll_1 <- colorRange_1(3)
colorsAll_2 <- colorRange_2(6)
cols = c('#93ABD3',rev(c(colorsAll_1,colorsAll_2[2:length(colorsAll_2)])))
############################################################################################################################################################
out_forced_merged_keep_pro = out_merged_keep_probiotic_all_keep %>% filter(popSize==popSizePick)
out_forced_merged_keep_both_plot_all_bar_f_agg_mean_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) myfunc(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_std_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) myfunc2(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_min_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) mymin(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_max_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) mymax(z))
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_pro)[4]='time2rec_median'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_std_pro)[4]='time2rec_std'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_min_pro)[4]='time2rec_min'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_max_pro)[4]='time2rec_max'
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_1 = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_pro,out_forced_merged_keep_both_plot_all_bar_f_agg_std_pro,
by=c('trt','inocSize','numOfTotalCourses'))
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_2 = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_min_pro,out_forced_merged_keep_both_plot_all_bar_f_agg_max_pro,
by=c('trt','inocSize','numOfTotalCourses'))
out_forced_merged_keep_both_plot_all_bar_f_agg_pro = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_pro_1,out_forced_merged_keep_both_plot_all_bar_f_agg_pro_2,
by=c('trt','inocSize','numOfTotalCourses'))
# out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_std=0
# out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_median=-0.1
out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_median=Inf
out_forced_merged_keep_anti = out_merged_keep_antibiotic_all_keep %>% filter(popSize==popSizePick)
combo_status <- c(
'31' = "s2p-p2s \n 1e-3 - 1e-1",
'32' = "s2p-p2s \n 1e-3 - 1e-2",
'41' = "s2p-p2s \n 1e-4 - 1e-1",
'42' = "s2p-p2s \n 1e-4 - 1e-2",
'51' = "s2p-p2s \n 1e-5 - 1e-1",
'52' = "s2p-p2s \n 1e-5 - 1e-2",
'0' = "s2p-p2s \n 0 - 0"
)
out_forced_merged_keep_anti = out_forced_merged_keep_anti %>% rowwise() %>% mutate(combo=-1*(10*log10(s2p/(32*24))+log10(p2s/(32*24))))
out_forced_merged_keep_anti[which(is.infinite(out_forced_merged_keep_anti$combo)),]$combo=0
out_forced_merged_keep_anti = out_forced_merged_keep_anti[, -which(names(out_forced_merged_keep_anti) %in% c('inocSize'))]
out_forced_merged_keep_both_plot_all_bar_f_agg_mean_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) myfunc(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_std_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) myfunc2(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_min_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) mymin(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_max_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) mymax(z))
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_anti)[3] = 'time2rec_median'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_std_anti)[3]='time2rec_std'
out_forced_merged_keep_both_plot_all_bar_f_agg_anti = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_anti,out_forced_merged_keep_both_plot_all_bar_f_agg_std_anti,by=c('combo','numOfTotalCourses'))
# out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_std=0
# out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_median=-0.1
out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_median=Inf
out_forced_merged_keep_both_plot_all_bar_f_agg_anti$combo =factor(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$combo,levels = c('31','32','41','42','51','52','0'))
out_forced_merged_keep_both_plot_all_bar_f_agg_anti = distinct(merge(out_forced_merged_keep_both_plot_all_bar_f_agg_anti,out_forced_merged_keep_anti[c('s2p','p2s','combo')],by='combo'))
a=fct_reorder(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$combo, out_forced_merged_keep_both_plot_all_bar_f_agg_anti$s2p)
levs=levels(a)
srlabels=c()
srlabels[1]='(0,0)'
for(k in 2:length(levs)){
substr(levs[k],1,1)
srlabels[k]=paste0('(1e-',substr(levs[k],1,1),',1e-',substr(levs[k],2,2),')')
}
############################################################################################################################################################
################################################################# PLOT #####################################################################################
############################################################################################################################################################
vecTempAnti = out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median+out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_std
vecTempPro = out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median+out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_std
ymaxAnti = max(vecTempAnti[is.finite(vecTempAnti)])
ymaxPro = max(vecTempPro[is.finite(vecTempPro)])
ymaxBoth = max(ymaxAnti,ymaxPro)+1
cols_back=rep('grey90',length(cols))
out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd=out_forced_merged_keep_both_plot_all_bar_f_agg_anti
out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd$time2rec_median)),]$time2rec_median=0
data_back = out_forced_merged_keep_both_plot_all_bar_f_agg_anti %>% mutate(combo = fct_reorder(combo, s2p))
data_fwd = out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd %>% mutate(combo = fct_reorder(combo, s2p))
barplot_temp_back = ggplot(data_back,aes(x = as.factor(numOfTotalCourses), y = time2rec_median, fill=factor(combo))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Switching rates \n(s2p,p2s)",values = cols_back, labels = srlabels)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA))
barplot_temp_fwd = ggplot(data=data_fwd,aes(x = as.factor(numOfTotalCourses),y = time2rec_median, fill=factor(combo))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Switching rates \n(s2p,p2s)",values = cols, labels = srlabels)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA), panel.grid = element_blank())
filname = paste0('./SS_time2rec_inocSize_antibiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_fwd_',suffix,'.png')
ggsave(filname,barplot_temp_fwd,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
filname = paste0('./SS_time2rec_inocSize_antibiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_back_',suffix,'.png')
ggsave(filname,barplot_temp_back,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd=out_forced_merged_keep_both_plot_all_bar_f_agg_pro
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd$time2rec_median)),]$time2rec_median=0
data_back = out_forced_merged_keep_both_plot_all_bar_f_agg_pro
data_fwd = out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd
barplot_temp_back = ggplot(data_back,aes(x = as.factor(numOfTotalCourses), y = time2rec_median, fill=factor(inocSize))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Normalized \ninoculum size",values = cols_back)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA))
barplot_temp_fwd = ggplot(data=data_fwd,aes(x = as.factor(numOfTotalCourses),y = time2rec_median, fill=factor(inocSize))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Normalized \ninoculum size",values = cols)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA), panel.grid = element_blank())
filname = paste0('./SS_time2rec_inocSize_probiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_fwd_',suffix,'.png')
ggsave(filname,barplot_temp_fwd,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
filname = paste0('./SS_time2rec_inocSize_probiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_back_',suffix,'.png')
ggsave(filname,barplot_temp_back,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
#
# barplot_temp = out_forced_merged_keep_both_plot_all_bar_f_agg_pro %>%
# ggplot(aes(x = as.factor(numOfTotalCourses), y = time2rec_median, fill=factor(inocSize))) +
# geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
# geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
# # geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_min, ymax=time2rec_max), width=0.5,
# colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# # geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
# scale_fill_manual(name="Normalized \ninoculum size",values = cols)+
# xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# # facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# # facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
# theme_bw()
#
#
# filname = paste0('./SS_time2rec_inocSize_probiotic_grid_',
# 'popSize_',log(popSizePick,10),'_',type,'_',suffix,'.png')
# ggsave(filname,barplot_temp,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
|
/plot_barcombo_time2recDiff.R
|
no_license
|
burcutepekule/saureusdecolonization
|
R
| false
| false
| 14,911
|
r
|
out_merged_keep_probiotic_all_keep = out_probiotic_agg_all_use
out_merged_keep_antibiotic_all_keep = out_antibiotic_agg_all_use
colorRange_1 <- colorRampPalette(c("#FFB396","#FF4646"))
colorRange_2 <- colorRampPalette(c("#FF4646","#3B14A7"))
colorsAll_1 <- colorRange_1(3)
colorsAll_2 <- colorRange_2(6)
cols = c('#93ABD3',rev(c(colorsAll_1,colorsAll_2[2:length(colorsAll_2)])))
############################################################################################################################################################
out_forced_merged_keep_pro = out_merged_keep_probiotic_all_keep %>% filter(popSize==popSizePick)
out_forced_merged_keep_both_plot_all_bar_f_agg_mean_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) myfunc(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_std_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) myfunc2(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_min_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) mymin(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_max_pro = aggregate(time2rec ~ numOfTotalCourses + inocSize + trt, out_forced_merged_keep_pro, FUN= function(z) mymax(z))
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_pro)[4]='time2rec_median'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_std_pro)[4]='time2rec_std'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_min_pro)[4]='time2rec_min'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_max_pro)[4]='time2rec_max'
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_1 = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_pro,out_forced_merged_keep_both_plot_all_bar_f_agg_std_pro,
by=c('trt','inocSize','numOfTotalCourses'))
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_2 = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_min_pro,out_forced_merged_keep_both_plot_all_bar_f_agg_max_pro,
by=c('trt','inocSize','numOfTotalCourses'))
out_forced_merged_keep_both_plot_all_bar_f_agg_pro = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_pro_1,out_forced_merged_keep_both_plot_all_bar_f_agg_pro_2,
by=c('trt','inocSize','numOfTotalCourses'))
# out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_std=0
# out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_median=-0.1
out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_pro[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median)),]$time2rec_median=Inf
out_forced_merged_keep_anti = out_merged_keep_antibiotic_all_keep %>% filter(popSize==popSizePick)
combo_status <- c(
'31' = "s2p-p2s \n 1e-3 - 1e-1",
'32' = "s2p-p2s \n 1e-3 - 1e-2",
'41' = "s2p-p2s \n 1e-4 - 1e-1",
'42' = "s2p-p2s \n 1e-4 - 1e-2",
'51' = "s2p-p2s \n 1e-5 - 1e-1",
'52' = "s2p-p2s \n 1e-5 - 1e-2",
'0' = "s2p-p2s \n 0 - 0"
)
out_forced_merged_keep_anti = out_forced_merged_keep_anti %>% rowwise() %>% mutate(combo=-1*(10*log10(s2p/(32*24))+log10(p2s/(32*24))))
out_forced_merged_keep_anti[which(is.infinite(out_forced_merged_keep_anti$combo)),]$combo=0
out_forced_merged_keep_anti = out_forced_merged_keep_anti[, -which(names(out_forced_merged_keep_anti) %in% c('inocSize'))]
out_forced_merged_keep_both_plot_all_bar_f_agg_mean_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) myfunc(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_std_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) myfunc2(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_min_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) mymin(z))
out_forced_merged_keep_both_plot_all_bar_f_agg_max_anti = aggregate(time2rec ~ numOfTotalCourses + combo , out_forced_merged_keep_anti, FUN= function(z) mymax(z))
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_anti)[3] = 'time2rec_median'
colnames(out_forced_merged_keep_both_plot_all_bar_f_agg_std_anti)[3]='time2rec_std'
out_forced_merged_keep_both_plot_all_bar_f_agg_anti = merge(out_forced_merged_keep_both_plot_all_bar_f_agg_mean_anti,out_forced_merged_keep_both_plot_all_bar_f_agg_std_anti,by=c('combo','numOfTotalCourses'))
# out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_std=0
# out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_median=-0.1
out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_anti[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median)),]$time2rec_median=Inf
out_forced_merged_keep_both_plot_all_bar_f_agg_anti$combo =factor(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$combo,levels = c('31','32','41','42','51','52','0'))
out_forced_merged_keep_both_plot_all_bar_f_agg_anti = distinct(merge(out_forced_merged_keep_both_plot_all_bar_f_agg_anti,out_forced_merged_keep_anti[c('s2p','p2s','combo')],by='combo'))
a=fct_reorder(out_forced_merged_keep_both_plot_all_bar_f_agg_anti$combo, out_forced_merged_keep_both_plot_all_bar_f_agg_anti$s2p)
levs=levels(a)
srlabels=c()
srlabels[1]='(0,0)'
for(k in 2:length(levs)){
substr(levs[k],1,1)
srlabels[k]=paste0('(1e-',substr(levs[k],1,1),',1e-',substr(levs[k],2,2),')')
}
############################################################################################################################################################
################################################################# PLOT #####################################################################################
############################################################################################################################################################
vecTempAnti = out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_median+out_forced_merged_keep_both_plot_all_bar_f_agg_anti$time2rec_std
vecTempPro = out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_median+out_forced_merged_keep_both_plot_all_bar_f_agg_pro$time2rec_std
ymaxAnti = max(vecTempAnti[is.finite(vecTempAnti)])
ymaxPro = max(vecTempPro[is.finite(vecTempPro)])
ymaxBoth = max(ymaxAnti,ymaxPro)+1
cols_back=rep('grey90',length(cols))
out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd=out_forced_merged_keep_both_plot_all_bar_f_agg_anti
out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd$time2rec_median)),]$time2rec_median=0
data_back = out_forced_merged_keep_both_plot_all_bar_f_agg_anti %>% mutate(combo = fct_reorder(combo, s2p))
data_fwd = out_forced_merged_keep_both_plot_all_bar_f_agg_anti_fwd %>% mutate(combo = fct_reorder(combo, s2p))
barplot_temp_back = ggplot(data_back,aes(x = as.factor(numOfTotalCourses), y = time2rec_median, fill=factor(combo))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Switching rates \n(s2p,p2s)",values = cols_back, labels = srlabels)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA))
barplot_temp_fwd = ggplot(data=data_fwd,aes(x = as.factor(numOfTotalCourses),y = time2rec_median, fill=factor(combo))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Switching rates \n(s2p,p2s)",values = cols, labels = srlabels)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA), panel.grid = element_blank())
filname = paste0('./SS_time2rec_inocSize_antibiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_fwd_',suffix,'.png')
ggsave(filname,barplot_temp_fwd,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
filname = paste0('./SS_time2rec_inocSize_antibiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_back_',suffix,'.png')
ggsave(filname,barplot_temp_back,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd=out_forced_merged_keep_both_plot_all_bar_f_agg_pro
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd$time2rec_median)),]$time2rec_std=0
out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd[which(is.infinite(out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd$time2rec_median)),]$time2rec_median=0
data_back = out_forced_merged_keep_both_plot_all_bar_f_agg_pro
data_fwd = out_forced_merged_keep_both_plot_all_bar_f_agg_pro_fwd
barplot_temp_back = ggplot(data_back,aes(x = as.factor(numOfTotalCourses), y = time2rec_median, fill=factor(inocSize))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Normalized \ninoculum size",values = cols_back)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA))
barplot_temp_fwd = ggplot(data=data_fwd,aes(x = as.factor(numOfTotalCourses),y = time2rec_median, fill=factor(inocSize))) +
geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
scale_fill_manual(name="Normalized \ninoculum size",values = cols)+
xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
theme_bw()+theme( panel.background = element_rect(fill='transparent'), plot.background = element_rect(fill='transparent', color=NA), panel.grid = element_blank())
filname = paste0('./SS_time2rec_inocSize_probiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_fwd_',suffix,'.png')
ggsave(filname,barplot_temp_fwd,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
filname = paste0('./SS_time2rec_inocSize_probiotic_grid_',
'popSize_',log(popSizePick,10),'_',type,'_back_',suffix,'.png')
ggsave(filname,barplot_temp_back,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
#
# barplot_temp = out_forced_merged_keep_both_plot_all_bar_f_agg_pro %>%
# ggplot(aes(x = as.factor(numOfTotalCourses), y = time2rec_median, fill=factor(inocSize))) +
# geom_bar(stat = "identity", position="dodge",width=0.8) + ylim(NA, ymaxBoth) +
# geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_median-time2rec_std, ymax=time2rec_median+time2rec_std), width=0.5,
# # geom_errorbar(aes(x=as.factor(numOfTotalCourses), ymin=time2rec_min, ymax=time2rec_max), width=0.5,
# colour="black", alpha=1, size=0.5, position= position_dodge(width = 0.8))+
# # geom_hline(data=out_forced_merged_keep_both_plot_all,aes(yintercept=stre1_ss), linetype="dashed", color = "black", size=0.5)+
# scale_fill_manual(name="Normalized \ninoculum size",values = cols)+
# xlab("Number of treatment courses (1 course = 5 days)")+ylab("Time to S. aureus \nrecurrence (months)")+
# # facet_wrap(~ inocSize, labeller = labeller(incoulum_size = inocSize), nrow = 1) +
# # facet_grid(. ~combo, labeller = labeller(combo = combo_status))+
# theme_bw()
#
#
# filname = paste0('./SS_time2rec_inocSize_probiotic_grid_',
# 'popSize_',log(popSizePick,10),'_',type,'_',suffix,'.png')
# ggsave(filname,barplot_temp,width = 8.5, height = 2.5, dpi = 300, units = "in", device='png')
|
## Put comments here that give an overall description of what your
## functions do
## These functions written in partial fulfillment of Coursera Data Science: R Programming
## Week 3 Assignment; week beginning Sep 14, 2017; GitHub user:Venpure
## Write a short comment describing this function
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve will retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
venpure/ProgrammingAssignment2
|
R
| false
| false
| 1,363
|
r
|
## Put comments here that give an overall description of what your
## functions do
## These functions written in partial fulfillment of Coursera Data Science: R Programming
## Week 3 Assignment; week beginning Sep 14, 2017; GitHub user:Venpure
## Write a short comment describing this function
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve will retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
library(shiny)
library(plyr)
library(ggplot2)
load("analytics.Rdata") # load the dataframe
shinyServer(function(input, output) { # server is defined within these parentheses
# prep data once and then pass around the program
passData <- reactive({
analytics <- analytics[analytics$Date %in% seq.Date(input$dateRange[1], input$dateRange[2], by = "days"),]
analytics <- analytics[analytics$Hour %in% as.numeric(input$minimumTime) : as.numeric(input$maximumTime),]
if(class(input$domainShow)=="character"){
analytics <- analytics[analytics$Domain %in% unlist(input$domainShow),]
}
analytics
})
output$monthGraph <- renderPlot({
graphData <- ddply(passData(), .(Domain, Date), numcolwise(sum))
if(input$outputType == "visitors"){
theGraph <- ggplot(graphData, aes(x = Date, y = visitors, group = Domain, colour = Domain)) + geom_line() +
ylab("Unique visitors")
}
if(input$outputType == "bounceRate"){
theGraph <- ggplot(graphData, aes(x = Date, y = bounces / visits * 100, group = Domain, colour = Domain)) +
geom_line() + ylab("Bounce rate %")
}
if(input$outputType == "timeOnSite"){
theGraph <- ggplot(graphData, aes(x = Date, y = timeOnSite / visits, group = Domain, colour = Domain)) +
geom_line() + ylab("Average time on site")
}
if(input$smoother){
theGraph <- theGraph + geom_smooth()
}
print(theGraph)
})
output$hourGraph <- renderPlot({
graphData = ddply(passData(), .(Domain, Hour), numcolwise(sum))
if(input$outputType == "visitors"){
theGraph <- ggplot(graphData, aes(x = Hour, y = visitors, group = Domain, colour = Domain)) + geom_line() +
ylab("Unique visitors")
}
if(input$outputType == "bounceRate"){
theGraph <- ggplot(graphData, aes(x = Hour, y = bounces / visits * 100, group = Domain, colour = Domain)) +
geom_line() + ylab("Bounce rate %")
}
if(input$outputType == "timeOnSite"){
theGraph <- ggplot(graphData, aes(x = Hour, y = timeOnSite / visits, group = Domain, colour = Domain)) +
geom_line() + ylab("Average time on site")
}
if(input$smoother){
theGraph <- theGraph + geom_smooth()
}
print(theGraph)
})
output$textDisplay <- renderText({
paste(
length(seq.Date(input$dateRange[1], input$dateRange[2], by = "days")),
" days are summarised. There were", sum(passData()$visitors), "visitors in this time period."
)
})
})
|
/server.R
|
no_license
|
ajk338/my_app
|
R
| false
| false
| 2,737
|
r
|
library(shiny)
library(plyr)
library(ggplot2)
load("analytics.Rdata") # load the dataframe
shinyServer(function(input, output) { # server is defined within these parentheses
# prep data once and then pass around the program
passData <- reactive({
analytics <- analytics[analytics$Date %in% seq.Date(input$dateRange[1], input$dateRange[2], by = "days"),]
analytics <- analytics[analytics$Hour %in% as.numeric(input$minimumTime) : as.numeric(input$maximumTime),]
if(class(input$domainShow)=="character"){
analytics <- analytics[analytics$Domain %in% unlist(input$domainShow),]
}
analytics
})
output$monthGraph <- renderPlot({
graphData <- ddply(passData(), .(Domain, Date), numcolwise(sum))
if(input$outputType == "visitors"){
theGraph <- ggplot(graphData, aes(x = Date, y = visitors, group = Domain, colour = Domain)) + geom_line() +
ylab("Unique visitors")
}
if(input$outputType == "bounceRate"){
theGraph <- ggplot(graphData, aes(x = Date, y = bounces / visits * 100, group = Domain, colour = Domain)) +
geom_line() + ylab("Bounce rate %")
}
if(input$outputType == "timeOnSite"){
theGraph <- ggplot(graphData, aes(x = Date, y = timeOnSite / visits, group = Domain, colour = Domain)) +
geom_line() + ylab("Average time on site")
}
if(input$smoother){
theGraph <- theGraph + geom_smooth()
}
print(theGraph)
})
output$hourGraph <- renderPlot({
graphData = ddply(passData(), .(Domain, Hour), numcolwise(sum))
if(input$outputType == "visitors"){
theGraph <- ggplot(graphData, aes(x = Hour, y = visitors, group = Domain, colour = Domain)) + geom_line() +
ylab("Unique visitors")
}
if(input$outputType == "bounceRate"){
theGraph <- ggplot(graphData, aes(x = Hour, y = bounces / visits * 100, group = Domain, colour = Domain)) +
geom_line() + ylab("Bounce rate %")
}
if(input$outputType == "timeOnSite"){
theGraph <- ggplot(graphData, aes(x = Hour, y = timeOnSite / visits, group = Domain, colour = Domain)) +
geom_line() + ylab("Average time on site")
}
if(input$smoother){
theGraph <- theGraph + geom_smooth()
}
print(theGraph)
})
output$textDisplay <- renderText({
paste(
length(seq.Date(input$dateRange[1], input$dateRange[2], by = "days")),
" days are summarised. There were", sum(passData()$visitors), "visitors in this time period."
)
})
})
|
# 装包
# install.packages("asbio")
# 加载包
library("asbio")
x11 <- rnorm(10,10,1)
x12 <- rnorm(10,10.2,1)
x13 <- rnorm(10,10.4,1)
x14 <- rnorm(10,10.6,1)
x21 <- rnorm(10,8,1)
x22 <- rnorm(10,8.2,1)
x23 <- rnorm(10,8.4,1)
x24 <- rnorm(10,8.6,1)
x31 <- rnorm(10,9,2)
x32 <- rnorm(10,9.2,2)
x33 <- rnorm(10,9.4,2)
x34 <- rnorm(10,9.6,2)
x41 <- rnorm(10,13,1.5)
x42 <- rnorm(10,13.2,1.5)
x43 <- rnorm(10,13.4,1.5)
x44 <- rnorm(10,13.6,1.5)
X = c(x11,x12,x13,x14,x21,x22,x23,x24,x31,x32,x33,x34,x41,x42,x43,x44)
A = gl(4,40)
B = gl(4,10,160)
data = data.frame(X,A,B)
# no interaction
# data.aov <- aov(X~A+B,data)
# summary(data.aov)
# draw..
# op <- par(mfrow = c(1,2))
# plot(X~A+B,data)
# data.aov2 <- aov(X~A+B+A:B,data)
data.aov <- aov(X~A*B,data)
summary(data.aov)
data.aov2 <- aov(X~A+B,data)
summary(data.aov2)
# 无重复实验的,不能做相互作用的方差分析?
y11 <- rnorm(1,10,1)
y12 <- rnorm(1,10.2,1)
y13 <- rnorm(1,10.4,1)
y14 <- rnorm(1,10.6,1)
y21 <- rnorm(1,8,1)
y22 <- rnorm(1,8.2,1)
y23 <- rnorm(1,8.4,1)
y24 <- rnorm(1,8.6,1)
y31 <- rnorm(1,9,2)
y32 <- rnorm(1,9.2,2)
y33 <- rnorm(1,9.4,2)
y34 <- rnorm(1,9.6,2)
y41 <- rnorm(1,13,1.5)
y42 <- rnorm(1,13.2,1.5)
y43 <- rnorm(1,13.4,1.5)
y44 <- rnorm(1,13.6,1.5)
# no repeat
Y = c(y11,y12,y13,y14,y21,y22,y23,y24,y31,y32,y33,y34,y41,y42,y43,y44)
A2 = gl(4,4)
B2 = gl(4,1,16)
# op <- par(mfrow = c(1,2))
# plot(Y~A2+B2,data)
data2 = data.frame(Y,A2,B2)
#
data2.aov <- aov(Y~A2+B2,data2)
summary(data2.aov)
# 交互检验
tukey.add.test(Y,A2,B2)
# 增大样本容量
x11 <- rnorm(20,10,1)
x12 <- rnorm(20,10.2,1)
x13 <- rnorm(20,10.4,1)
x14 <- rnorm(20,10.6,1)
x21 <- rnorm(20,8,1)
x22 <- rnorm(20,8.2,1)
x23 <- rnorm(20,8.4,1)
x24 <- rnorm(20,8.6,1)
x31 <- rnorm(20,9,2)
x32 <- rnorm(20,9.2,2)
x33 <- rnorm(20,9.4,2)
x34 <- rnorm(20,9.6,2)
x41 <- rnorm(20,13,1.5)
x42 <- rnorm(20,13.2,1.5)
x43 <- rnorm(20,13.4,1.5)
x44 <- rnorm(20,13.6,1.5)
X = c(x11,x12,x13,x14,x21,x22,x23,x24,x31,x32,x33,x34,x41,x42,x43,x44)
A = gl(4,80)
B = gl(4,20,320)
data = data.frame(X,A,B)
data.aov <- aov(X~A*B,data)
summary(data.aov)
myaov <- function(X,A,B){
# 处理A带的信息
len1 = length(A)
les1 = length(levels(A))
len2 = length(B)
les2 = length(levels(B))
allevel = les2*les1
n = (length(X)/les2)/les1
m = mean(X)
# 这两步,很难想到。。所以写起来很痛苦。。。
func <- function(sp){
return(mean(X[sp]))
}
sp = split(1:len1,A)
am = lapply(sp,func)
sp2 = split(1:len2,B)
bm = lapply(sp2,func)
SSt = sum((X-m)**2)
vt = length(X)-1
# Mt = SSt/vt
sum = 0
for (j in 1:(allevel-1)) {
count = c()
for (i in 1:n) {
count = c(count,X[j*n+i])
}
sum = sum + (mean(count)-m)**2
}
SStreat = sum*n
vtreat = les1*les2-1
SSa = les2*n*sum((as.numeric(am) - m)**2)
va = les1-1
Ma = SSa/va
SSb = les1*n*sum((as.numeric(bm) - m)**2)
vb = les2-1
Mb = SSb/vb
SSab = SStreat - SSb -SSa
vab = va*vb
Mab = SSab/vab
SSe = SSt - SStreat
ve = allevel*(n-1)
Me = SSe/ve
data = list()
data$Fa = Ma / Me
data$Fb = Mb / Me
data$Fab = Mab / Me
data$pa = 1-pf(data$Fa,va,ve)
data$pb = 1-pf(data$Fb,vb,ve)
data$pab = 1-pf(data$Fab,vab,ve)
return(data)
}
print(myaov(X,A,B))
|
/biostatics/sy3/sy3-2.R
|
no_license
|
zonghao7366/Rproject
|
R
| false
| false
| 3,269
|
r
|
# 装包
# install.packages("asbio")
# 加载包
library("asbio")
x11 <- rnorm(10,10,1)
x12 <- rnorm(10,10.2,1)
x13 <- rnorm(10,10.4,1)
x14 <- rnorm(10,10.6,1)
x21 <- rnorm(10,8,1)
x22 <- rnorm(10,8.2,1)
x23 <- rnorm(10,8.4,1)
x24 <- rnorm(10,8.6,1)
x31 <- rnorm(10,9,2)
x32 <- rnorm(10,9.2,2)
x33 <- rnorm(10,9.4,2)
x34 <- rnorm(10,9.6,2)
x41 <- rnorm(10,13,1.5)
x42 <- rnorm(10,13.2,1.5)
x43 <- rnorm(10,13.4,1.5)
x44 <- rnorm(10,13.6,1.5)
X = c(x11,x12,x13,x14,x21,x22,x23,x24,x31,x32,x33,x34,x41,x42,x43,x44)
A = gl(4,40)
B = gl(4,10,160)
data = data.frame(X,A,B)
# no interaction
# data.aov <- aov(X~A+B,data)
# summary(data.aov)
# draw..
# op <- par(mfrow = c(1,2))
# plot(X~A+B,data)
# data.aov2 <- aov(X~A+B+A:B,data)
data.aov <- aov(X~A*B,data)
summary(data.aov)
data.aov2 <- aov(X~A+B,data)
summary(data.aov2)
# 无重复实验的,不能做相互作用的方差分析?
y11 <- rnorm(1,10,1)
y12 <- rnorm(1,10.2,1)
y13 <- rnorm(1,10.4,1)
y14 <- rnorm(1,10.6,1)
y21 <- rnorm(1,8,1)
y22 <- rnorm(1,8.2,1)
y23 <- rnorm(1,8.4,1)
y24 <- rnorm(1,8.6,1)
y31 <- rnorm(1,9,2)
y32 <- rnorm(1,9.2,2)
y33 <- rnorm(1,9.4,2)
y34 <- rnorm(1,9.6,2)
y41 <- rnorm(1,13,1.5)
y42 <- rnorm(1,13.2,1.5)
y43 <- rnorm(1,13.4,1.5)
y44 <- rnorm(1,13.6,1.5)
# no repeat
Y = c(y11,y12,y13,y14,y21,y22,y23,y24,y31,y32,y33,y34,y41,y42,y43,y44)
A2 = gl(4,4)
B2 = gl(4,1,16)
# op <- par(mfrow = c(1,2))
# plot(Y~A2+B2,data)
data2 = data.frame(Y,A2,B2)
#
data2.aov <- aov(Y~A2+B2,data2)
summary(data2.aov)
# 交互检验
tukey.add.test(Y,A2,B2)
# 增大样本容量
x11 <- rnorm(20,10,1)
x12 <- rnorm(20,10.2,1)
x13 <- rnorm(20,10.4,1)
x14 <- rnorm(20,10.6,1)
x21 <- rnorm(20,8,1)
x22 <- rnorm(20,8.2,1)
x23 <- rnorm(20,8.4,1)
x24 <- rnorm(20,8.6,1)
x31 <- rnorm(20,9,2)
x32 <- rnorm(20,9.2,2)
x33 <- rnorm(20,9.4,2)
x34 <- rnorm(20,9.6,2)
x41 <- rnorm(20,13,1.5)
x42 <- rnorm(20,13.2,1.5)
x43 <- rnorm(20,13.4,1.5)
x44 <- rnorm(20,13.6,1.5)
X = c(x11,x12,x13,x14,x21,x22,x23,x24,x31,x32,x33,x34,x41,x42,x43,x44)
A = gl(4,80)
B = gl(4,20,320)
data = data.frame(X,A,B)
data.aov <- aov(X~A*B,data)
summary(data.aov)
myaov <- function(X,A,B){
# 处理A带的信息
len1 = length(A)
les1 = length(levels(A))
len2 = length(B)
les2 = length(levels(B))
allevel = les2*les1
n = (length(X)/les2)/les1
m = mean(X)
# 这两步,很难想到。。所以写起来很痛苦。。。
func <- function(sp){
return(mean(X[sp]))
}
sp = split(1:len1,A)
am = lapply(sp,func)
sp2 = split(1:len2,B)
bm = lapply(sp2,func)
SSt = sum((X-m)**2)
vt = length(X)-1
# Mt = SSt/vt
sum = 0
for (j in 1:(allevel-1)) {
count = c()
for (i in 1:n) {
count = c(count,X[j*n+i])
}
sum = sum + (mean(count)-m)**2
}
SStreat = sum*n
vtreat = les1*les2-1
SSa = les2*n*sum((as.numeric(am) - m)**2)
va = les1-1
Ma = SSa/va
SSb = les1*n*sum((as.numeric(bm) - m)**2)
vb = les2-1
Mb = SSb/vb
SSab = SStreat - SSb -SSa
vab = va*vb
Mab = SSab/vab
SSe = SSt - SStreat
ve = allevel*(n-1)
Me = SSe/ve
data = list()
data$Fa = Ma / Me
data$Fb = Mb / Me
data$Fab = Mab / Me
data$pa = 1-pf(data$Fa,va,ve)
data$pb = 1-pf(data$Fb,vb,ve)
data$pab = 1-pf(data$Fab,vab,ve)
return(data)
}
print(myaov(X,A,B))
|
\name{tabix_getregion}
\alias{tabix_getregion}
\title{
Return the currently selected region of the given tabix file.
}
\description{
Return the currently selected region of the given tabix file. The resulting value does
not reflect the current read position inside that region, i.e. you cannot infer whether
there are any lines left for reading from that region.
}
\usage{
tabix_getregion( tabfh )
}
\arguments{
\item{tabfh}{Tabix handle, once returned by tabix_open}
}
\details{
Use .Call("tabix_getRegion", tabfh ) to eliminate the slight overhead of using the R wrapper function.
}
\value{
Tabix file handle
}
%XXXXXXXXXXXXXXX
\examples{
##
## Example :
##
gffgzfile <- system.file("extdata", "ex.gff3.gz", package = "WhopGenome" )
gffh <- tabix_open( gffgzfile )
gffh
tabix_setregion( gffh, "ex.1", 1, 400 )
tabix_getregion( gffh )
tabix_close( gffh )
gffh
}
\author{
Ulrich Wittelsbuerger
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
tabix_open
}
|
/man/tabix_getregion.Rd
|
no_license
|
cran/WhopGenome
|
R
| false
| false
| 976
|
rd
|
\name{tabix_getregion}
\alias{tabix_getregion}
\title{
Return the currently selected region of the given tabix file.
}
\description{
Return the currently selected region of the given tabix file. The resulting value does
not reflect the current read position inside that region, i.e. you cannot infer whether
there are any lines left for reading from that region.
}
\usage{
tabix_getregion( tabfh )
}
\arguments{
\item{tabfh}{Tabix handle, once returned by tabix_open}
}
\details{
Use .Call("tabix_getRegion", tabfh ) to eliminate the slight overhead of using the R wrapper function.
}
\value{
Tabix file handle
}
%XXXXXXXXXXXXXXX
\examples{
##
## Example :
##
gffgzfile <- system.file("extdata", "ex.gff3.gz", package = "WhopGenome" )
gffh <- tabix_open( gffgzfile )
gffh
tabix_setregion( gffh, "ex.1", 1, 400 )
tabix_getregion( gffh )
tabix_close( gffh )
gffh
}
\author{
Ulrich Wittelsbuerger
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
tabix_open
}
|
##########################################################################################################################################
## IMPORTANT: Make sure you've run loanchargeoff_deployment.R to create the web service before using this script. You'll also
## need to have an ssh session open to the server, as described in the steps in https://aka.ms/campaigntypical?path=hdi#step3
## Finally, scroll to last section to read further instructions for testing the api_frame call
##
##########################################################################################################################################
## This R script should be executed in your local machine to test the web service
## Before remote login from local, please open a ssh session with localhost port 12800 (ssh user login, not admin)
##
## This R script will do the followings:
## 1. Remote connect to the port 12800 of the edge node which hosts the web service
## 2. Call the web service from your local machine
## Input : 1. Full path of the four input tables on HDFS or four tables in data frame
## 2. Working directories on local edge node and HDFS
## 3. Stage: "Dev" for development; "Prod" for batch scoring; "Web" for scoring remotely with web service
## Output: The directory on HDFS which contains the result of recommendations
##########################################################################################################################################
##############################################################################################################################
#
# Remote Login for Authentication #
#
##############################################################################################################################
# Load mrsdeploy package
library(mrsdeploy)
# Remote login (admin login)
remoteLogin(
"http://localhost:12800",
username = "admin",
password = "<Enter Cluster Login Password Here>",
session = FALSE
)
##############################################################################################################################
#
# Get and Call the Web Service for String Input #
#
##############################################################################################################################
# Specify the name and version of the web service
name_string <- "loan_chargeoff_string_input"
version <- "v0.0.1"
# Get the API for string input
api_string <- getService(name_string, version)
# Specify working directories on edge node and HDFS
LocalWorkDir <- paste("/var/RevoShare/", Sys.info()[["user"]], "/LoanChargeOff/prod", sep="")
HDFSDataDir <- "/LoanChargeOff/Data"
HDFSWorkDir <- "/LoanChargeOff/web"
# Specify the full path of .csv files on HDFS
Loan_Data <- "Loan_Data1000.csv"
# Call the web service
result_string <- api_string$loan_web_scoring(
Loan_Data = Loan_Data,
LocalWorkDir = LocalWorkDir,
HDFSWorkDir = HDFSWorkDir,
HDFSDataDir = HDFSDataDir,
userName = Sys.info()[["user"]],
Stage = "Web")
##############################################################################################################################
#
# Get and Call the Web Service for data frame Input
# Run this section after putting data into a local folder
# Change local_data_dir accordingly
#
##############################################################################################################################
# Specify the name and version of the web service
name_frame <- "loan_chargeoff_frame_input"
version <- "v0.0.1"
# Get the API for data frame input
api_frame <- getService(name_frame, version)
# Specify working directories on edge node and HDFS
LocalWorkDir <- paste("/var/RevoShare/", Sys.info()[["user"]], "/LoanChargeOff/prod", sep="")
HDFSDataDir <- "/LoanChargeOff/Data"
HDFSWorkDir <- "/LoanChargeOff/web"
# input data
Loan_Data_df <- read.csv("https://raw.githubusercontent.com/Microsoft/r-server-loan-chargeoff/master/HDI/Data/Loan_Data1000.csv")
# Call the web service
result_string <- api_frame$loan_web_scoring(
Loan_Data = Loan_Data_df,
LocalWorkDir = LocalWorkDir,
HDFSWorkDir = HDFSWorkDir,
HDFSDataDir = HDFSDataDir,
userName = Sys.info()[["user"]],
Stage = "Web")
|
/HDI/RSparkCluster/loanchargeoff_web_scoring.R
|
no_license
|
madhuraraju/r-server-loan-chargeoff
|
R
| false
| false
| 4,549
|
r
|
##########################################################################################################################################
## IMPORTANT: Make sure you've run loanchargeoff_deployment.R to create the web service before using this script. You'll also
## need to have an ssh session open to the server, as described in the steps in https://aka.ms/campaigntypical?path=hdi#step3
## Finally, scroll to last section to read further instructions for testing the api_frame call
##
##########################################################################################################################################
## This R script should be executed in your local machine to test the web service
## Before remote login from local, please open a ssh session with localhost port 12800 (ssh user login, not admin)
##
## This R script will do the followings:
## 1. Remote connect to the port 12800 of the edge node which hosts the web service
## 2. Call the web service from your local machine
## Input : 1. Full path of the four input tables on HDFS or four tables in data frame
## 2. Working directories on local edge node and HDFS
## 3. Stage: "Dev" for development; "Prod" for batch scoring; "Web" for scoring remotely with web service
## Output: The directory on HDFS which contains the result of recommendations
##########################################################################################################################################
##############################################################################################################################
#
# Remote Login for Authentication #
#
##############################################################################################################################
# Load mrsdeploy package
library(mrsdeploy)
# Remote login (admin login)
remoteLogin(
"http://localhost:12800",
username = "admin",
password = "<Enter Cluster Login Password Here>",
session = FALSE
)
##############################################################################################################################
#
# Get and Call the Web Service for String Input #
#
##############################################################################################################################
# Specify the name and version of the web service
name_string <- "loan_chargeoff_string_input"
version <- "v0.0.1"
# Get the API for string input
api_string <- getService(name_string, version)
# Specify working directories on edge node and HDFS
LocalWorkDir <- paste("/var/RevoShare/", Sys.info()[["user"]], "/LoanChargeOff/prod", sep="")
HDFSDataDir <- "/LoanChargeOff/Data"
HDFSWorkDir <- "/LoanChargeOff/web"
# Specify the full path of .csv files on HDFS
Loan_Data <- "Loan_Data1000.csv"
# Call the web service
result_string <- api_string$loan_web_scoring(
Loan_Data = Loan_Data,
LocalWorkDir = LocalWorkDir,
HDFSWorkDir = HDFSWorkDir,
HDFSDataDir = HDFSDataDir,
userName = Sys.info()[["user"]],
Stage = "Web")
##############################################################################################################################
#
# Get and Call the Web Service for data frame Input
# Run this section after putting data into a local folder
# Change local_data_dir accordingly
#
##############################################################################################################################
# Specify the name and version of the web service
name_frame <- "loan_chargeoff_frame_input"
version <- "v0.0.1"
# Get the API for data frame input
api_frame <- getService(name_frame, version)
# Specify working directories on edge node and HDFS
LocalWorkDir <- paste("/var/RevoShare/", Sys.info()[["user"]], "/LoanChargeOff/prod", sep="")
HDFSDataDir <- "/LoanChargeOff/Data"
HDFSWorkDir <- "/LoanChargeOff/web"
# input data
Loan_Data_df <- read.csv("https://raw.githubusercontent.com/Microsoft/r-server-loan-chargeoff/master/HDI/Data/Loan_Data1000.csv")
# Call the web service
result_string <- api_frame$loan_web_scoring(
Loan_Data = Loan_Data_df,
LocalWorkDir = LocalWorkDir,
HDFSWorkDir = HDFSWorkDir,
HDFSDataDir = HDFSDataDir,
userName = Sys.info()[["user"]],
Stage = "Web")
|
weightedAverage <- function(x, y, weights = TRUE, ...) {
if (inherits(x, "character"))
x <- raster::raster(x)
crp <- raster::crop(x, y, ...)
## normal average, ie without weights
if (!weights) {
mean(crp[], na.rm = TRUE)
## weighted average
} else {
spy <- as(raster::extent(y), "SpatialPolygons")
sp::proj4string(spy) <- sp::proj4string(y)
xtr <- raster::extract(crp, spy, weights = TRUE)[[1]]
if (all(is.na(xtr[, 1]))) {
NA
} else {
if (any(is.na(xtr[, 1])))
xtr[which(is.na(xtr[, 1])), 2] <- NA
sum(apply(xtr, 1, prod), na.rm = TRUE) / sum(xtr[, 2], na.rm = TRUE)
}
}
}
|
/R/weightedAverage.R
|
no_license
|
environmentalinformatics-marburg/bafire
|
R
| false
| false
| 674
|
r
|
weightedAverage <- function(x, y, weights = TRUE, ...) {
if (inherits(x, "character"))
x <- raster::raster(x)
crp <- raster::crop(x, y, ...)
## normal average, ie without weights
if (!weights) {
mean(crp[], na.rm = TRUE)
## weighted average
} else {
spy <- as(raster::extent(y), "SpatialPolygons")
sp::proj4string(spy) <- sp::proj4string(y)
xtr <- raster::extract(crp, spy, weights = TRUE)[[1]]
if (all(is.na(xtr[, 1]))) {
NA
} else {
if (any(is.na(xtr[, 1])))
xtr[which(is.na(xtr[, 1])), 2] <- NA
sum(apply(xtr, 1, prod), na.rm = TRUE) / sum(xtr[, 2], na.rm = TRUE)
}
}
}
|
`rf.GetCVErr` <-
function(.x, statevec, numtree,CVlist,MyClassWt,MySampSize)
{
NumCVFold <- nrow(CVlist)
NumStates <- length(unique(statevec))
#cv.errors <- matrix(NA, NumCVFold,ncol(.x))
#sample.errors <- matrix(NA, NumCVFold, nrow(.x))
sample.errors.bycvfold <- array(data=NA, dim=c(ncol(.x),NumCVFold, nrow(.x)))
sample.votes.bycvfold <- array(data=NA, dim=c(ncol(.x), NumCVFold, nrow(.x),NumStates))
NumStates <- length(unique(statevec))
UStates <- unique(statevec)
cat("Number of CV fold to be run", NumCVFold, "\n")
for (j in 1:NumCVFold)
{
print(j)
indexout.j <- CVlist[j,]
indexout.j <- indexout.j[!is.na(indexout.j)]
### Defining the training and test data for the j-th CV step
x.in<- .x[-indexout.j,]
y.in <- statevec[-indexout.j]
x.out <- .x[indexout.j,]
y.out <- statevec[indexout.j]
NumFtsI <- ncol(x.in)
while(NumFtsI >= 2)
{
mymtry <- round(NumFtsI^0.5)
if(!is.null(MySampSize)){
RF1 <- randomForest(x =x.in, y = y.in, importance = TRUE, outscale = TRUE, mtry = mymtry, ntree = numtree,classwt = MyClassWt, sampsize=MySampSize)}
if(is.null(MySampSize)){
RF1 <- randomForest(x =x.in, y = y.in, importance = TRUE, outscale = TRUE, mtry = mymtry, ntree = numtree,classwt = MyClassWt)}
#num <- nrow(x.out)
#cv.errors[j,NumFtsI] <- (num - sum(y.out == predict(RF1,x.out)))/num
sample.errors.bycvfold[NumFtsI,j,indexout.j] <- as.character(predict(RF1, x.out))
sample.votes.bycvfold[NumFtsI,j,indexout.j,] <- predict(RF1,x.out,type="prob")
if (NumFtsI <= 2){NumFtsI<-1}
if (NumFtsI > 2 & NumFtsI <=100){
imp <- RF1$importance
imp.dec.accuracy <- imp[,NumStates+1]
vars.order <- sort(imp.dec.accuracy,index.return=TRUE)$ix
vars.keep <- vars.order[2:length(vars.order)]
x.in <- x.in[,vars.keep]
x.out <- x.out[,vars.keep]
NumFtsI <- ncol(x.in)
}
if (NumFtsI > 100){
imp <- RF1$importance
imp.dec.accuracy <- imp[,NumStates+1]
vars.order <- sort(imp.dec.accuracy,index.return=TRUE)$ix
keep.percent <- round(0.1*NumFtsI)
vars.keep <- vars.order[(keep.percent+1):length(vars.order)]
x.in <- x.in[,vars.keep]
x.out <- x.out[,vars.keep]
NumFtsI <- ncol(x.in)
}
}
}
#### Output an average CV error (overall classes and class specific) as a function of number of features in classifier
#### Output a predicted state for each subject as a function of the number of features in classifier
SamplePredictions <- matrix(NA, nrow(.x), ncol(.x))
SamplePrediction.ClassProb <- matrix(NA, nrow(.x), ncol(.x))
ErrorRates <- array(data=NA, dim=c(ncol(.x), NumCVFold, length(unique(statevec)) + 1))
ErrorRatesFromSamplePred <- array(data=NA, dim=c(ncol(.x), length(unique(statevec)) + 1))
for (i in 1:ncol(.x)){#print(i)
tempi <- sample.errors.bycvfold[i,,]
results.i <- GetCVOutput(tempi, statevec)
#SamplePredictions[,i] <- results.i$SamplePreds
#SamplePrediction.ClassProb[,i] <- results.i$SamplePredErrs
ErrorRates[i,,] <- t(results.i$CVErrRates)
#ErrorRatesFromSamplePred[i,] <- ClassErrFun(results.i$SamplePreds, statevec)
}
#out <- list(sample.votes.bycvfold = sample.votes.bycvfold, sample.errors.bycvfold = sample.errors.bycvfold, SamplePredictions=SamplePredictions,SamplePrediction.ClassProb=SamplePrediction.ClassProb,ErrorRates=ErrorRates,ErrorRatesFromSamplePred=ErrorRatesFromSamplePred)
out <- apply(t(ErrorRates[,,1]),2,mean,na.rm=TRUE)
return(out)
}
|
/R/rf.GetCVErr.R
|
no_license
|
cran/MVpower
|
R
| false
| false
| 3,986
|
r
|
`rf.GetCVErr` <-
function(.x, statevec, numtree,CVlist,MyClassWt,MySampSize)
{
NumCVFold <- nrow(CVlist)
NumStates <- length(unique(statevec))
#cv.errors <- matrix(NA, NumCVFold,ncol(.x))
#sample.errors <- matrix(NA, NumCVFold, nrow(.x))
sample.errors.bycvfold <- array(data=NA, dim=c(ncol(.x),NumCVFold, nrow(.x)))
sample.votes.bycvfold <- array(data=NA, dim=c(ncol(.x), NumCVFold, nrow(.x),NumStates))
NumStates <- length(unique(statevec))
UStates <- unique(statevec)
cat("Number of CV fold to be run", NumCVFold, "\n")
for (j in 1:NumCVFold)
{
print(j)
indexout.j <- CVlist[j,]
indexout.j <- indexout.j[!is.na(indexout.j)]
### Defining the training and test data for the j-th CV step
x.in<- .x[-indexout.j,]
y.in <- statevec[-indexout.j]
x.out <- .x[indexout.j,]
y.out <- statevec[indexout.j]
NumFtsI <- ncol(x.in)
while(NumFtsI >= 2)
{
mymtry <- round(NumFtsI^0.5)
if(!is.null(MySampSize)){
RF1 <- randomForest(x =x.in, y = y.in, importance = TRUE, outscale = TRUE, mtry = mymtry, ntree = numtree,classwt = MyClassWt, sampsize=MySampSize)}
if(is.null(MySampSize)){
RF1 <- randomForest(x =x.in, y = y.in, importance = TRUE, outscale = TRUE, mtry = mymtry, ntree = numtree,classwt = MyClassWt)}
#num <- nrow(x.out)
#cv.errors[j,NumFtsI] <- (num - sum(y.out == predict(RF1,x.out)))/num
sample.errors.bycvfold[NumFtsI,j,indexout.j] <- as.character(predict(RF1, x.out))
sample.votes.bycvfold[NumFtsI,j,indexout.j,] <- predict(RF1,x.out,type="prob")
if (NumFtsI <= 2){NumFtsI<-1}
if (NumFtsI > 2 & NumFtsI <=100){
imp <- RF1$importance
imp.dec.accuracy <- imp[,NumStates+1]
vars.order <- sort(imp.dec.accuracy,index.return=TRUE)$ix
vars.keep <- vars.order[2:length(vars.order)]
x.in <- x.in[,vars.keep]
x.out <- x.out[,vars.keep]
NumFtsI <- ncol(x.in)
}
if (NumFtsI > 100){
imp <- RF1$importance
imp.dec.accuracy <- imp[,NumStates+1]
vars.order <- sort(imp.dec.accuracy,index.return=TRUE)$ix
keep.percent <- round(0.1*NumFtsI)
vars.keep <- vars.order[(keep.percent+1):length(vars.order)]
x.in <- x.in[,vars.keep]
x.out <- x.out[,vars.keep]
NumFtsI <- ncol(x.in)
}
}
}
#### Output an average CV error (overall classes and class specific) as a function of number of features in classifier
#### Output a predicted state for each subject as a function of the number of features in classifier
SamplePredictions <- matrix(NA, nrow(.x), ncol(.x))
SamplePrediction.ClassProb <- matrix(NA, nrow(.x), ncol(.x))
ErrorRates <- array(data=NA, dim=c(ncol(.x), NumCVFold, length(unique(statevec)) + 1))
ErrorRatesFromSamplePred <- array(data=NA, dim=c(ncol(.x), length(unique(statevec)) + 1))
for (i in 1:ncol(.x)){#print(i)
tempi <- sample.errors.bycvfold[i,,]
results.i <- GetCVOutput(tempi, statevec)
#SamplePredictions[,i] <- results.i$SamplePreds
#SamplePrediction.ClassProb[,i] <- results.i$SamplePredErrs
ErrorRates[i,,] <- t(results.i$CVErrRates)
#ErrorRatesFromSamplePred[i,] <- ClassErrFun(results.i$SamplePreds, statevec)
}
#out <- list(sample.votes.bycvfold = sample.votes.bycvfold, sample.errors.bycvfold = sample.errors.bycvfold, SamplePredictions=SamplePredictions,SamplePrediction.ClassProb=SamplePrediction.ClassProb,ErrorRates=ErrorRates,ErrorRatesFromSamplePred=ErrorRatesFromSamplePred)
out <- apply(t(ErrorRates[,,1]),2,mean,na.rm=TRUE)
return(out)
}
|
# Test utils - only used for tests
#' Get path to the posterior database
#' @noRd
get_test_pdb_dir <- function(x) {
x <- getwd()
# If on Travis - use Travis build path
# To handle covr::codecov, that test package in temp folder
if (on_travis()) x <- Sys.getenv("TRAVIS_BUILD_DIR")
# If on Appveyor - use Appveyor build path
if (on_appveyor()) x <- Sys.getenv("APPVEYOR_BUILD_FOLDER")
find_local_posterior_database(x)
}
find_local_posterior_database <- function(x){
checkmate::assert_directory(x)
fp <- normalizePath(x)
while (!"posterior_database" %in% dir(fp) & basename(fp) != "") {
fp <- dirname(fp)
}
if (basename(fp) == "") {
stop2("No local posterior database in path '", x, "'.")
}
fpep <- file.path(fp, "posterior_database")
pdb <- pdb_local(fpep)
if(is_pdb_endpoint(pdb)){
return(fpep)
} else {
stop2("No local posterior database in path '", fpep, "'.")
}
}
on_travis <- function() identical(Sys.getenv("TRAVIS"), "true")
on_appveyor <- function() identical(tolower(Sys.getenv("APPVEYOR")), "true")
on_covr <- function() identical(Sys.getenv("R_COVR"), "true")
|
/rpackage/R/utils_tests.R
|
no_license
|
gbdrt/posteriordb
|
R
| false
| false
| 1,127
|
r
|
# Test utils - only used for tests
#' Get path to the posterior database
#' @noRd
get_test_pdb_dir <- function(x) {
x <- getwd()
# If on Travis - use Travis build path
# To handle covr::codecov, that test package in temp folder
if (on_travis()) x <- Sys.getenv("TRAVIS_BUILD_DIR")
# If on Appveyor - use Appveyor build path
if (on_appveyor()) x <- Sys.getenv("APPVEYOR_BUILD_FOLDER")
find_local_posterior_database(x)
}
find_local_posterior_database <- function(x){
checkmate::assert_directory(x)
fp <- normalizePath(x)
while (!"posterior_database" %in% dir(fp) & basename(fp) != "") {
fp <- dirname(fp)
}
if (basename(fp) == "") {
stop2("No local posterior database in path '", x, "'.")
}
fpep <- file.path(fp, "posterior_database")
pdb <- pdb_local(fpep)
if(is_pdb_endpoint(pdb)){
return(fpep)
} else {
stop2("No local posterior database in path '", fpep, "'.")
}
}
on_travis <- function() identical(Sys.getenv("TRAVIS"), "true")
on_appveyor <- function() identical(tolower(Sys.getenv("APPVEYOR")), "true")
on_covr <- function() identical(Sys.getenv("R_COVR"), "true")
|
library(ggplot2)
library(dplyr)
library(FactoMineR) # pour MCA
library(missMDA)
library(cluster) # pour agnes
library(RColorBrewer)
library(NbClust)
library(fastcluster) # pour hclust
library(factoextra) # pour fviz_mca_ind
library(sp)
library(ggdendro)
{
load(file="data/satellite/Detritus/TabDetfin.Rdata")
load(file="data/satellite/Primary production/TabPPfin.Rdata")
load(file="data/satellite/sst/Tabsstfin.Rdata")
load(file="data/satellite/chl/Tabchlfin.Rdata")
load(file="data/satellite/Turbidity/TabTurbfin.Rdata")
load(file="data/satellite/Salinity/TabSalfin.Rdata")
load(file="data/satellite/Particles/TabPartfin.Rdata")
load(file="data/satellite/O2/TabO2fin.Rdata")
load("data/satellite/Detritus/Det_polygons.Rdata")
load("data/satellite/Primary production/PP_polygons.Rdata")
load("data/satellite/sst/sst_polygons.Rdata")
load("data/satellite/chl/chl_polygons.Rdata")
load("data/satellite/Turbidity/Turb_polygons.Rdata")
load("data/satellite/Salinity/Sal_polygons.Rdata")
load("data/satellite/Particles/Part_polygons.Rdata")
load("data/satellite/O2/O2_polygons.Rdata")
load("data/PolyCut.Rdata")
}
# create an empty grid of values ranging from the xmin-xmax, ymin-ymax
grd <- expand.grid(Long = seq(from = min(TabSalfin$x),
to = max(TabSalfin$x),
by = 0.01),
Lat = seq(from =min(TabSalfin$y),
to = max(TabSalfin$y),
by = 0.01))
points <- structure(list(grd$Long, grd$Lat), .Names = c("Long", "Lat"),
class = "data.frame", row.names = c(NA, dim(grd)[1]))
spdf <- SpatialPointsDataFrame(coords = points, data = points)
noms <- c("polChl","polDet","polO2","polPart","polPP","polSal","polSST","polTurb")
t <- 0
for (i in list(polChl,polDet,polO2,polPart,polPP,polSal,polSST,polTurb)){
t <- t+1
pipo <- sp::over(spdf, i)
pipo$Clust2 <- paste0(rep(substr(noms[t],4,nchar(noms[t])),dim(pipo)[1]), pipo$Clust)
names(pipo) <- c("Clust",substr(noms[t],4,nchar(noms[t])))
grd <- cbind(grd,pipo)
}
dim(na.omit(grd))
grd2<- na.omit(grd)
plot(grd2[,c(1,2)])
grd2<- grd2[, !duplicated(colnames(grd2))]
grd2<- grd2[,-3]
# ACM
rez<- MCA(grd2[,-c(1,2)], ncp=999, method="Burt", graph=F)
plt1<- plotellipses(rez, axes=c(1,2))
plt2<- plotellipses(rez, axes=c(1,3))
# Classification
distance<- dist(rez$ind$coord)
tree<- hclust(distance, method="ward.D2")
plot(tree, hang= -1, main ="", ylab ="Distance", xlab= "", labels = F)
#NbClust(rez$ind$coord, min.nc = 2, max.nc = 10, index="all", method = "ward.D2")
# According to the majority rule, the best number of clusters is 10 (5 indicateurs, puis 4 indicateurs pour 6, 3 ou 2 clusters)
rect.hclust(tree, k=6)
groups<- cutree(tree, k=6)
fviz_mca_ind(rez, repel=T, addEllipses=F, axes=c(1,2), geom="point",
col.ind = factor(groups),
#palette = brewer.pal(n = 6, name = "YlOrBr"),
pointsize = 4,
labelsize = 1,
title = "")+
scale_color_manual(name = "Zones", values= c("#FFFFD4", "#FEE391", "#FEC44F", "#FE9929", "#D95F0E", "#993404"))+
theme(legend.title = element_text(size= 25))+
theme(legend.text = element_text(size= 25))+
theme(axis.title.x = element_text(size= 20))+
theme(axis.title.y = element_text(size= 20))
tata<- cbind(grd2[,c(1,2)],Clust=factor(groups))
save(tata, file="results/satellite/Coordzones.Rdata")
# Plot
ggplot(tata)+
geom_tile(aes(x=Long,y=Lat,fill= as.numeric(Clust)))+
geom_polygon(data=PolyCut, aes(x=long, y=lat, group=group), fill=NA, col="black")+
#ggtitle("Final bioregionalization")+
scale_fill_gradientn(colours =brewer.pal(n = 5, name = "YlOrBr")) +
xlab("Longitude")+
ylab("Latitude")+
theme_minimal()+
labs(fill= "Zones")+
theme(legend.title = element_text(size = 15))+
theme(legend.text = element_text(size = 15))+
theme(plot.title = element_text(size = 20))+
theme(axis.title.x = element_text(size = 15))+
theme(axis.text.x = element_text(size = 10))+
theme(axis.title.y = element_text(size = 15))+
theme(axis.text.y = element_text(size = 10))
# Découpage tata en fonction de polycut
load("data/res.Rdata")
# create SpatialPointsDataFrame
tataras<- tata
coordinates(tataras)<- ~ Long + Lat
# coerce to SpatialPixelsDataFrame
gridded(tataras) <- TRUE
# coerce to raster
rastertata<- raster(tataras)
rastertata
plot(rastertata, col=brewer.pal(n = 6, name = "YlOrBr"), xlab="Longitude", ylab="Latitude")
rastertata2<- mask(rastertata, res)
plot(rastertata2, col=brewer.pal(n = 6, name = "YlOrBr"), main="Après mask", xlab="Longitude", ylab="Latitude")
# Conversion raster - tableau
fortify.Raster <- function(rastertata2, maxPixel = 1000000) {
if (ncell(rastertata2) > maxPixel) {
x <- sampleRegular(rastertata2, maxPixel, asRaster=TRUE)
}
xy <- xyFromCell(rastertata2, seq_len(ncell(rastertata2)))
out <- rastertata2 %>%
getValues() %>%
data.frame(values = .) %>%
cbind(xy)
return(out)
}
tatatab<- fortify(rastertata2)
tatatab<- na.omit(tatatab)
allparam<- ggplot(tatatab)+
geom_tile(aes(x=x,y=y,fill= as.factor(values)))+
geom_polygon(data=PolyCut, aes(x=long, y=lat, group=group), fill=NA, col="black")+
#ggtitle("Final bioregionalization")+
scale_fill_manual(values = c("#FFFFD4", "#FEE391", "#FEC44F", "#FE9929", "#D95F0E", "#993404"))+
xlab("Longitude")+
ylab("Latitude")+
theme_minimal()+
labs(fill= "Zones")+
geom_sf(data=windfarms, fill="blue")+
theme(legend.title = element_text(size = 30))+
theme(legend.text = element_text(size = 30))+
theme(plot.title = element_text(size = 20))+
theme(axis.title.x = element_text(size = 15))+
theme(axis.text.x = element_text(size = 10))+
theme(axis.title.y = element_text(size = 15))+
theme(axis.text.y = element_text(size = 10))
save(allparam, file="results/satellite/zones/Env.Rdata")
ggsave(plot= allparam, filename="All.jpeg", path="results/satellite/zones", width = 13, height = 8)
|
/analyses/Regionalisation finale.R
|
permissive
|
JehanneRiv/SeineMSP
|
R
| false
| false
| 6,408
|
r
|
library(ggplot2)
library(dplyr)
library(FactoMineR) # pour MCA
library(missMDA)
library(cluster) # pour agnes
library(RColorBrewer)
library(NbClust)
library(fastcluster) # pour hclust
library(factoextra) # pour fviz_mca_ind
library(sp)
library(ggdendro)
{
load(file="data/satellite/Detritus/TabDetfin.Rdata")
load(file="data/satellite/Primary production/TabPPfin.Rdata")
load(file="data/satellite/sst/Tabsstfin.Rdata")
load(file="data/satellite/chl/Tabchlfin.Rdata")
load(file="data/satellite/Turbidity/TabTurbfin.Rdata")
load(file="data/satellite/Salinity/TabSalfin.Rdata")
load(file="data/satellite/Particles/TabPartfin.Rdata")
load(file="data/satellite/O2/TabO2fin.Rdata")
load("data/satellite/Detritus/Det_polygons.Rdata")
load("data/satellite/Primary production/PP_polygons.Rdata")
load("data/satellite/sst/sst_polygons.Rdata")
load("data/satellite/chl/chl_polygons.Rdata")
load("data/satellite/Turbidity/Turb_polygons.Rdata")
load("data/satellite/Salinity/Sal_polygons.Rdata")
load("data/satellite/Particles/Part_polygons.Rdata")
load("data/satellite/O2/O2_polygons.Rdata")
load("data/PolyCut.Rdata")
}
# create an empty grid of values ranging from the xmin-xmax, ymin-ymax
grd <- expand.grid(Long = seq(from = min(TabSalfin$x),
to = max(TabSalfin$x),
by = 0.01),
Lat = seq(from =min(TabSalfin$y),
to = max(TabSalfin$y),
by = 0.01))
points <- structure(list(grd$Long, grd$Lat), .Names = c("Long", "Lat"),
class = "data.frame", row.names = c(NA, dim(grd)[1]))
spdf <- SpatialPointsDataFrame(coords = points, data = points)
noms <- c("polChl","polDet","polO2","polPart","polPP","polSal","polSST","polTurb")
t <- 0
for (i in list(polChl,polDet,polO2,polPart,polPP,polSal,polSST,polTurb)){
t <- t+1
pipo <- sp::over(spdf, i)
pipo$Clust2 <- paste0(rep(substr(noms[t],4,nchar(noms[t])),dim(pipo)[1]), pipo$Clust)
names(pipo) <- c("Clust",substr(noms[t],4,nchar(noms[t])))
grd <- cbind(grd,pipo)
}
dim(na.omit(grd))
grd2<- na.omit(grd)
plot(grd2[,c(1,2)])
grd2<- grd2[, !duplicated(colnames(grd2))]
grd2<- grd2[,-3]
# ACM
rez<- MCA(grd2[,-c(1,2)], ncp=999, method="Burt", graph=F)
plt1<- plotellipses(rez, axes=c(1,2))
plt2<- plotellipses(rez, axes=c(1,3))
# Classification
distance<- dist(rez$ind$coord)
tree<- hclust(distance, method="ward.D2")
plot(tree, hang= -1, main ="", ylab ="Distance", xlab= "", labels = F)
#NbClust(rez$ind$coord, min.nc = 2, max.nc = 10, index="all", method = "ward.D2")
# According to the majority rule, the best number of clusters is 10 (5 indicateurs, puis 4 indicateurs pour 6, 3 ou 2 clusters)
rect.hclust(tree, k=6)
groups<- cutree(tree, k=6)
fviz_mca_ind(rez, repel=T, addEllipses=F, axes=c(1,2), geom="point",
col.ind = factor(groups),
#palette = brewer.pal(n = 6, name = "YlOrBr"),
pointsize = 4,
labelsize = 1,
title = "")+
scale_color_manual(name = "Zones", values= c("#FFFFD4", "#FEE391", "#FEC44F", "#FE9929", "#D95F0E", "#993404"))+
theme(legend.title = element_text(size= 25))+
theme(legend.text = element_text(size= 25))+
theme(axis.title.x = element_text(size= 20))+
theme(axis.title.y = element_text(size= 20))
tata<- cbind(grd2[,c(1,2)],Clust=factor(groups))
save(tata, file="results/satellite/Coordzones.Rdata")
# Plot
ggplot(tata)+
geom_tile(aes(x=Long,y=Lat,fill= as.numeric(Clust)))+
geom_polygon(data=PolyCut, aes(x=long, y=lat, group=group), fill=NA, col="black")+
#ggtitle("Final bioregionalization")+
scale_fill_gradientn(colours =brewer.pal(n = 5, name = "YlOrBr")) +
xlab("Longitude")+
ylab("Latitude")+
theme_minimal()+
labs(fill= "Zones")+
theme(legend.title = element_text(size = 15))+
theme(legend.text = element_text(size = 15))+
theme(plot.title = element_text(size = 20))+
theme(axis.title.x = element_text(size = 15))+
theme(axis.text.x = element_text(size = 10))+
theme(axis.title.y = element_text(size = 15))+
theme(axis.text.y = element_text(size = 10))
# Découpage tata en fonction de polycut
load("data/res.Rdata")
# create SpatialPointsDataFrame
tataras<- tata
coordinates(tataras)<- ~ Long + Lat
# coerce to SpatialPixelsDataFrame
gridded(tataras) <- TRUE
# coerce to raster
rastertata<- raster(tataras)
rastertata
plot(rastertata, col=brewer.pal(n = 6, name = "YlOrBr"), xlab="Longitude", ylab="Latitude")
rastertata2<- mask(rastertata, res)
plot(rastertata2, col=brewer.pal(n = 6, name = "YlOrBr"), main="Après mask", xlab="Longitude", ylab="Latitude")
# Conversion raster - tableau
fortify.Raster <- function(rastertata2, maxPixel = 1000000) {
if (ncell(rastertata2) > maxPixel) {
x <- sampleRegular(rastertata2, maxPixel, asRaster=TRUE)
}
xy <- xyFromCell(rastertata2, seq_len(ncell(rastertata2)))
out <- rastertata2 %>%
getValues() %>%
data.frame(values = .) %>%
cbind(xy)
return(out)
}
tatatab<- fortify(rastertata2)
tatatab<- na.omit(tatatab)
allparam<- ggplot(tatatab)+
geom_tile(aes(x=x,y=y,fill= as.factor(values)))+
geom_polygon(data=PolyCut, aes(x=long, y=lat, group=group), fill=NA, col="black")+
#ggtitle("Final bioregionalization")+
scale_fill_manual(values = c("#FFFFD4", "#FEE391", "#FEC44F", "#FE9929", "#D95F0E", "#993404"))+
xlab("Longitude")+
ylab("Latitude")+
theme_minimal()+
labs(fill= "Zones")+
geom_sf(data=windfarms, fill="blue")+
theme(legend.title = element_text(size = 30))+
theme(legend.text = element_text(size = 30))+
theme(plot.title = element_text(size = 20))+
theme(axis.title.x = element_text(size = 15))+
theme(axis.text.x = element_text(size = 10))+
theme(axis.title.y = element_text(size = 15))+
theme(axis.text.y = element_text(size = 10))
save(allparam, file="results/satellite/zones/Env.Rdata")
ggsave(plot= allparam, filename="All.jpeg", path="results/satellite/zones", width = 13, height = 8)
|
lambda <- seq(600,0,by=-5)
BIC_vec<-rep(Inf,length(lambda))
# specify starting values for the very first fit; pay attention that Delta.start has suitable length!
Delta.start<-as.matrix(t(rep(0,583)))
Q.start<-diag(0.1, 2)
for(j in 1:length(lambda))
{
print(paste("Iteration ", j,sep=""))
glm3 <- glmmLasso(log(rt) ~
scale(subtlex.frequency) +
scale(celex.frequency) +
scale(celex.frequency.lemma) +
scale(bnc.frequency),
rnd = list(participant = ~1, spelling = ~1),
data = subset(dat, !is.na(rt)),
lambda=lambda[j],
switch.NR=FALSE,
final.re=TRUE,
control = glmmLassoControl(start=Delta.start[j,],q_start=Q.start))
print(colnames(glm3$Deltamatrix)[2:5][glm3$Deltamatrix[glm3$conv.step,2:5]!=0])
BIC_vec[j]<-glm3$bic
Delta.start<-rbind(Delta.start,glm3$Deltamatrix[glm3$conv.step,])
Q.start<-glm3$Q_long[[glm3$conv.step+1]]
}
opt3<-which.min(BIC_vec)
final.model <- glmmLasso(log(rt) ~
scale(subtlex.frequency) +
scale(celex.frequency) +
scale(celex.frequency.lemma) +
scale(bnc.frequency),
rnd = list(participant = ~1, spelling = ~1),
data = subset(dat, !is.na(rt)),
lambda=lambda[opt3],
switch.NR=FALSE,
final.re=TRUE)
|
/w2/lambda.R
|
no_license
|
aecay/leeds-modeling-workshop
|
R
| false
| false
| 1,565
|
r
|
lambda <- seq(600,0,by=-5)
BIC_vec<-rep(Inf,length(lambda))
# specify starting values for the very first fit; pay attention that Delta.start has suitable length!
Delta.start<-as.matrix(t(rep(0,583)))
Q.start<-diag(0.1, 2)
for(j in 1:length(lambda))
{
print(paste("Iteration ", j,sep=""))
glm3 <- glmmLasso(log(rt) ~
scale(subtlex.frequency) +
scale(celex.frequency) +
scale(celex.frequency.lemma) +
scale(bnc.frequency),
rnd = list(participant = ~1, spelling = ~1),
data = subset(dat, !is.na(rt)),
lambda=lambda[j],
switch.NR=FALSE,
final.re=TRUE,
control = glmmLassoControl(start=Delta.start[j,],q_start=Q.start))
print(colnames(glm3$Deltamatrix)[2:5][glm3$Deltamatrix[glm3$conv.step,2:5]!=0])
BIC_vec[j]<-glm3$bic
Delta.start<-rbind(Delta.start,glm3$Deltamatrix[glm3$conv.step,])
Q.start<-glm3$Q_long[[glm3$conv.step+1]]
}
opt3<-which.min(BIC_vec)
final.model <- glmmLasso(log(rt) ~
scale(subtlex.frequency) +
scale(celex.frequency) +
scale(celex.frequency.lemma) +
scale(bnc.frequency),
rnd = list(participant = ~1, spelling = ~1),
data = subset(dat, !is.na(rt)),
lambda=lambda[opt3],
switch.NR=FALSE,
final.re=TRUE)
|
##The first function, makeCacheMatrix creates a special "matrix", which is really a list containing a function to
## - set the value of the matrix
## - get the value of the matrix
## - set the inverse matrix of the matrix
## - get the inverse matrix of the matrix
makeCacheMatrix <- function(x = matrix()) {
cm <- NULL
set <- function(y){
x <<- y
cm <<- NULL
}
get <- function() x
setInverse <- function(solve) cm <<- solve ## To solve for inverse of Matrix
getInverse <- function() cm
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) # this is creating that list mentioned earlier
}
##The second function first checks to see if the inverse has already been computed from the first function.
##If so, it gets the inverse matrix from the cache and skips the computation.
##Otherwise, it computes the inverse of the matrix and sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cm <- x$getInverse()
## checking if the inverse exists, otherwise computes inverse.
if(!is.null(cm)){
message("getting cached data")
return(cm)
}
data <- x$get()
cm <- solve(data)
x$setInverse(cm)
cm
}
|
/cachematrix.R
|
no_license
|
wwaichee/ProgrammingAssignment2
|
R
| false
| false
| 1,383
|
r
|
##The first function, makeCacheMatrix creates a special "matrix", which is really a list containing a function to
## - set the value of the matrix
## - get the value of the matrix
## - set the inverse matrix of the matrix
## - get the inverse matrix of the matrix
makeCacheMatrix <- function(x = matrix()) {
cm <- NULL
set <- function(y){
x <<- y
cm <<- NULL
}
get <- function() x
setInverse <- function(solve) cm <<- solve ## To solve for inverse of Matrix
getInverse <- function() cm
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) # this is creating that list mentioned earlier
}
##The second function first checks to see if the inverse has already been computed from the first function.
##If so, it gets the inverse matrix from the cache and skips the computation.
##Otherwise, it computes the inverse of the matrix and sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cm <- x$getInverse()
## checking if the inverse exists, otherwise computes inverse.
if(!is.null(cm)){
message("getting cached data")
return(cm)
}
data <- x$get()
cm <- solve(data)
x$setInverse(cm)
cm
}
|
### BIGCAAT: BIGDAWG Integrated Genotype Converted Amino Acid Testing
### Version 0.3.3
### Authors: Liva Tran, Vinh Luu, Steven J. Mack
##Combines Datafile Procession, AA extraction, and combination analyzer into one function. Changes made for redundancy.
#Requirements
require(data.table)
require(stringr)
require(BIGDAWG)
require(gtools)
require(dplyr) ## LT
load("AA_atlas.rda")
##Part 1 - Datafile Processing##
Datafile_Processing <- function(locus, Genotype_Data) {
#Takes every other column and the one after - pairs of 2
Final_Data <- Genotype_Data[,1:2]
colnames(Final_Data) <- colnames(Genotype_Data)[1:2]
#Takes every column pair and runs it though the check function -> Gives a table of the data where the all the alleles are truncated to 2 fields and any 1 field alleles are replaced by NA
for (x in seq(3,length(Genotype_Data),2)) {
if (colnames(Genotype_Data[x]) %in% locus) {
Allele_Columns <- Genotype_Data[,x:(x+1)] ## not a list of lists
# print(paste("Column pairs:", x,(x+1), sep = " ")) ### SJM silencing unnessary messaging
colnames(Allele_Columns) <- colnames(Genotype_Data)[x:(x+1)]
Final_Data <- cbind(Final_Data, Dataset_Allele_Check_V2(Allele_Columns))
}
}
Final_Data
}
Dataset_Allele_Check_V2 <- function(Alleles) {
#Declaring needed variables
count <- a <- 0
Temp_List <- apply(Alleles, FUN = GetField, Res = 1, MARGIN = c(1,2))
Final_Alleles <- data.frame(Alleles, check.names = FALSE) #This will get returned later. We will modify this with the following for loop.
#Takes each column and creates a logical table (T if 1 field, F otherwise) -> Following the logical table, replace data with NA if 1 field, and all other data with 2 field, regardless of initial field count. I.E "12:24" stays "12:24" but "12:52:42" truncates to "12:52"
for (i in 1:2) {
comparison <- Alleles[,i] %in% Temp_List[,i]
count <- sum(length(which(comparison))) + count #Counts number of 1 field alleles, which show up as TRUE in the comparison table.
a <- matrix(ifelse(comparison, NA, sapply(Alleles[,i], FUN = GetField, Res = 2)), nrow(Alleles), 1, byrow = FALSE) #a is temporary list for easier replacement of rows.
Final_Alleles[[i]] <- a
}
# as.matrix(Final_Alleles)
#Calculates percentage of the data that is 1 field, outputs an integer value denoting how many 1 field alleles were in the data and outputs a percentage.
percentage <- (count / (nrow(Alleles) * 2))
# print(paste("The number of single field Alleles is:", count, sep = " ")) ### SJM silencing unnecessary messages
# print(paste("The percentage of single field Alleles in this column pair is:", percentage, sep = " ")) ### SJM as above
#Checks if the percentage of single field alleles is below a certain threshold. This is currently not changable by the user but can be implemented.
if (percentage > .05) {
stop("This column pair has too many alleles that are single field.")
} else {
# print("This column pair is good to go!") ### SJM silencing unnecessary messages
}
Final_Alleles
}
##Part 2 - Amino Acid Extraction##
countSpaces <- function(x){
counter <- 0
coll <- numeric()
vec <- strsplit(x," ")[[1]]
for(i in 1:length(vec)){
if (vec[i]==""){
counter <- counter+1
}
else{
if (counter!=0) coll <- c(coll,counter)
counter <- 1
}
}
coll
}
CWDverify <- function(){
require(data.table)
## Pull down the CWD catalogue
CWD <- list()
CWD$data <- fread("https://www.uvm.edu/~igdawg/pubs/cwd200_alleles.txt",skip = 1,stringsAsFactors = FALSE,select = c(2,3),showProgress = FALSE)
CWD$version <- fread("https://www.uvm.edu/~igdawg/pubs/cwd200_alleles.txt",nrows = 1,stringsAsFactors = FALSE,select=1,showProgress = FALSE)
## Pull down the hla_nom.txt, Deleted_alleles.txt and allelelist.txt files to create a table of v3.0.0+ deleted alleles, their ACCs,their replacements, and their ACCs
deletedHLA <- list()
# Temporarily store the entire hla_nom.txt in $version
deletedHLA$version <- fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/wmda/hla_nom.txt",skip=6, stringsAsFactors = FALSE,sep = ";", col.names = c("Locus","AlleleName","NewName","Event"),select = c(1,2,5,6),showProgress = FALSE)
## Exclude entries without allele name changes
deletedHLA$data <- deletedHLA$version[deletedHLA$version$NewName !="",]
# Exclude pre-db release 3.0.0 alleles
deletedHLA$data <- deletedHLA$data[grep(":",deletedHLA$data$AlleleName,fixed=TRUE),]
## Process and extract the accession numbers from the Deleted_alleles.txt file, temporarily stored in $version
deletedHLA$version <- fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Deleted_alleles.txt",stringsAsFactors = FALSE,skip = 7,sep=",",header=TRUE,fill=TRUE,showProgress = FALSE)
## Below to account for one extra comma in line 106 (hopefully, can be deleted in a future release)
if(ncol(deletedHLA$version)==4) {deletedHLA$version$Description[98] <- paste(deletedHLA$version$Description[98],deletedHLA$version$V4[98],sep=" ")
deletedHLA$version <- deletedHLA$version[,1:3] }
# Store the pertinent accession numbers in the data element
deletedHLA$data$origAccession <- deletedHLA$version$AlleleID[match(paste(deletedHLA$data$Locus,deletedHLA$data$AlleleName,sep=""),deletedHLA$version$Allele)]
# Temporarily store the allelelist.txt file in $version
deletedHLA$version <- fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Allelelist.txt",skip=6, stringsAsFactors = FALSE,sep = ",", header=TRUE,showProgress = FALSE)
deletedHLA$data$newAccession <- deletedHLA$version$AlleleID[match(paste(deletedHLA$data$Locus,deletedHLA$data$NewName,sep=""),deletedHLA$version$Allele)]
# overwrite the Deleted_alelles.txt files with the version information
deletedHLA$version <- cbind(fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/wmda/hla_nom.txt",stringsAsFactors = FALSE,nrows = 5,sep="?",header=TRUE,showProgress = FALSE),fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Deleted_alleles.txt",stringsAsFactors = FALSE,nrows = 5,sep="?",header=TRUE,showProgress = FALSE), fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Allelelist.txt",nrows=5, stringsAsFactors = FALSE,sep = "?", header=TRUE,showProgress = FALSE))
## Match accession numbers in CWD to the Accession numbers in the deleted alleles.
changeCWD <- match(CWD$data$`IMGT/HLA Accession Number`,deletedHLA$data$origAccession)
# Create full allele names for the new names
deletedHLA$data$NewName <- paste(deletedHLA$data$Locus,deletedHLA$data$NewName,sep="")
CWD$data[!is.na(changeCWD),] <- cbind(deletedHLA$data[changeCWD[!is.na(changeCWD)],6],deletedHLA$data[changeCWD[!is.na(changeCWD)],3])
# Rename the columns of the verified CWD table
colnames(CWD$data) <- c("Accession","AlleleName")
CWD$data
}
variantAAextractor<-function(loci,genotypefiles){
#reads in genotype data
#gdata <- read.table(genotypefiles, sep="\t", header=T, check.names = F, stringsAsFactors = F)
gdata <- genotypefiles
gdata <- Datafile_Processing(loci, gdata) #Vinh's function
#sets blank cells to NA
#if cells do not contain NA, locus names are pasted to the allele in the MS_file
for (i in 3:ncol(gdata)){
# gdata[gdata==""]<-NA
gdata[[i]]<-ifelse(is.na(gdata[[i]])==FALSE, paste(colnames(gdata[i]),gdata[,i],sep="*"), NA)}
#removes rows with only ALL NA data
gdata<-gdata[!(rowSums(is.na(gdata))==ncol(gdata)-2),]
#creates empty variables for future for loops
start<-end<-alignment<-list()
#creates empty variables where each element is named after the used loci
#empty variables for correspondence table
inDels<-corr_table<-cols<-downloaded_segments<-w<-alignment_positions<-alignment_length<-alignment_start<-prot_extractions<-refblock_number<-end_char<-space_diff<-
#empty variables for exon_extractor function
variantAApositions<-geno_exonlist<-missing_geno_output<-missing_geno<-rep_variantAA<-mastertablecols<-mastertable<-position_parsed<-nonCWD_checked<-nonCWDtrunc<-singleAA_exon<-singleAA_alleles<-pastedAAseq<-columns<-all_gdata<-genotype_variants<-geno_alleles<-AA_segments<-AA_aligned <-refexon<-pepsplit<-alignment<-exonlist<- sapply(loci, function(x) NULL)
#begin for loop
for(i in 1:length(loci)){
#downloads relevant locus alignment file -- readLines allows for space preservation, which is important in
#finding where the alignment sequence starts
#alignment[[loci[i]]] <- readLines(paste("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/alignments/",paste(ifelse(loci[[i]]=="DRB1","DRB",loci[[i]]),"_prot.txt",sep=""),sep=""),-1,ok=TRUE,skipNul = FALSE)
alignment[[loci[i]]] <- readLines(paste("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/alignments/",paste(ifelse(loci[[i]] %in% c("DRB1", "DRB3", "DRB4", "DRB5"),"DRB",loci[[i]]),"_prot.txt",sep=""),sep=""),-1,ok=TRUE,skipNul = FALSE)
#alters alignment file by cutting out non-pertinent information in beginning
#and endind of alignment file
alignment[[loci[i]]] <- head(alignment[[loci[i]]],-3)
alignment[[loci[i]]] <- tail(alignment[[loci[i]]],-7)
#see countSpaces function at beginning of script
#Counts difference between Prot to -30 and beginning of Prot to -30 + 1 due to zero number indexing to find where
#the alignment sequence actually starts
space_diff[[loci[i]]]<-(nchar(strsplit(alignment[[loci[i]]][3], " ")[[1]][2])+countSpaces(alignment[[loci[i]]][3])[2]+1)-countSpaces(alignment[[loci[i]]][2])[1]
#reduces repeated whitespace in alignment file and removes rows with empty values for proper
#start and stop subsetting
alignment[[loci[i]]] <-str_squish(alignment[[loci[i]]])
alignment[[loci[i]]] <-alignment[[loci[i]]][-which(alignment[[loci[i]]] == "")]
#determines positions of "Prot" and the end of that reference block segment
start[[loci[i]]]<-as.numeric(grep("Prot", alignment[[loci[i]]]))
end[[loci[i]]] <- as.numeric(c(start[[loci[i]]][2:length(start[[loci[i]]])]-1,length(alignment[[loci[i]]])))
#counts number of characters in the very last allele to add onto the last Prot enumeration block
#to obtain end length
end_char[[loci[i]]]<-nchar(sapply(strsplit(gsub(" ", "", sub(" ", "~", str_squish(tail(alignment[[loci[i]]], 1)))), "~"), "[", 2))-1
#extracts rows with "Prot" and reference sequence position information
#extracts only relevant reference sequence positions
#NOTE: the first row containing "Prot" contains two numbers -- -30 and 1 -- where only -30, is extracted,
#as the actual sequence start will always be 1
for (j in 1:length(start[[loci[i]]])){
prot_extractions[[loci[i]]][j]<-strsplit(alignment[[loci[i]]][start[[loci[i]]][j]], " ")
refblock_number[[loci[i]]][j]<-as.numeric(sapply(prot_extractions[[loci[i]]][j], "[", 2))
#determines the alignment start by adding -30 to the difference between white spaces found above
alignment_start[[loci[i]]]<-refblock_number[[loci[i]]][1]+space_diff[[loci[i]]]
}
#closes all white space in the alignment file, except for the white space separating the allele and peptide sequence
alignment[[loci[i]]] <-paste(substr(alignment[[loci[i]]],1,regexpr(" ",text = alignment[[loci[i]]],fixed = TRUE)), gsub(" ","",substr(alignment[[loci[i]]],regexpr(" ",text = alignment[[loci[i]]],fixed = TRUE),nchar(alignment[[loci[i]]]))),sep = "")
#string splits at white spaces to yield allele and peptide sequences
alignment[[loci[i]]] <- strsplit(alignment[[loci[i]]]," ", fixed=T)
#binds the previously split strings by row
alignment[[loci[i]]] <- do.call(rbind,alignment[[loci[i]]])
#if the pepseq column is equal to the allele column due to premature peptide termination,
#insert a blank in place of the allele in the pepseq column
alignment[[loci[i]]][which(alignment[[loci[i]]][,1]==alignment[[loci[i]]][,2]),2] <- ""
#renames columns to "alleles" and "pepseq"
colnames(alignment[[loci[i]]])<-c(paste(loci[[i]], "alleles", sep="_"), "pepseq")
#due to ANHIG formatting, cases where an allele contains newly reference peptide sequences will not
#contain the same number of rows as previous reference peptide blocks
#this for loop is invoked to add "."for all other alleles for each character in the newly reference peptide
#to preserve structural integrity
for(k in 1:length(start[[loci[i]]])){
if(nrow(alignment[[i]][start[[loci[i]]][k]:end[[loci[i]]][k],])!=nrow(alignment[[loci[i]]][start[[loci[i]]][1]:end[[loci[i]]][1],])){
x<-as.data.frame(alignment[[loci[i]]][,1][start[[loci[i]]][1]:end[[loci[i]]][1]][-c(1,2)], stringsAsFactors = F)
colnames(x)<-paste(loci[[i]], "alleles", sep="_")
x<-cbind.data.frame(x, pepseq=as.character(paste(rep(".", nchar(tail(alignment[[loci[i]]][,2], 1))), collapse = "")), stringsAsFactors=FALSE)
y<-data.frame(tail(alignment[[loci[i]]],1), stringsAsFactors = F)
x$pepseq[match(y[,1], x[,1])]<-y$pepseq
alignment[[loci[i]]]<-as.matrix(rbind(head(alignment[[loci[i]]], -1), x))
start[[loci[i]]]<-as.numeric(grep("Prot", alignment[[loci[i]]]))
end[[loci[i]]] <- as.numeric(c(start[[loci[i]]][2:length(start[[loci[i]]])]-1,nrow(alignment[[loci[i]]])))}}
#if a locus has extra formatting, resulting in unqeual rows, start and end will be updated to reflect subsetting
#if a locus has no extra formatting, start and end will remain the same, as procured by earlier code
for(e in 1:length(start[[loci[i]]])){
AA_segments[[loci[i]]]<-cbind(AA_segments[[loci[i]]], alignment[[loci[i]]][start[[loci[i]]][e]:end[[loci[i]]][e],])}
#removes first two rows containing AA position and "Prot"
AA_segments[[loci[i]]] <- AA_segments[[loci[i]]][-c(1,2),]
#designates columns to be combined as every other so allele names are not included
#in pasting all the amino acid sequences together
cols<-seq(0, ncol(AA_segments[[loci[i]]]), by=2)
AA_segments[[loci[i]]]<-cbind(AA_segments[[loci[i]]][,1], apply(AA_segments[[loci[i]]][,cols], 1 ,paste, collapse = ""))
#creates a new matrix with the number of columns equal to the number of characters in the reference sequence
corr_table[[loci[i]]]<-matrix(NA, nrow = 2, ncol = as.numeric(nchar(AA_segments[[loci[i]]][,2][1]))) ### SJM added NA argument
#determines alignment length based on the total number of characters plus the alignment start (which is negative )
alignment_length[[loci[i]]]<-as.numeric(nchar(AA_segments[[loci[i]]][,2][1]))+alignment_start[[loci[[i]]]]
#pastes alignment_start to alignment_length together in sequential order, with inDels accounted for
#captures output as "w"
w[[i]] <- capture.output(cat(alignment_start[[loci[i]]]:alignment_length[[loci[i]]]))
#splits string formed by cat for separate character variables
alignment_positions[[loci[i]]]<-as.character(unlist(strsplit(w[[loci[i]]], " ")))
#eliminates "0", as the alignment sequence from ANHIG does not contain 0
alignment_positions[[loci[i]]]<-alignment_positions[[loci[i]]][-which(alignment_positions[[loci[i]]] == 0)]
#contains alignment sequence information
corr_table[[loci[i]]][2,]<-alignment_positions[[loci[i]]]
#string splits to extract locus in the allele name
#assigns to new variable "AA_aligned"
AA_aligned[[loci[i]]]<- as.matrix(do.call(rbind,strsplit(AA_segments[[loci[i]]][,1],"[*]")))
#adds a new column of pasted locus and trimmed two field alleles to AA_aligned
AA_aligned[[loci[i]]]<- cbind(AA_aligned[[loci[i]]], paste(AA_aligned[[loci[i]]][,1], apply(AA_aligned[[loci[i]]],MARGIN=c(1,2),FUN=GetField,Res=2)[,2], sep="*"))
#binds AA_aligned and AA_segments -- renames columns
AA_segments[[loci[i]]] <- cbind(AA_aligned[[loci[i]]], AA_segments[[loci[i]]])
colnames(AA_segments[[loci[i]]]) <- c("locus", "full_allele", "trimmed_allele", "allele_name", "AAsequence")
#sets refexon to a reference peptide for each HLA locus based on the reference sequences in AA_segments
refexon[[loci[i]]] <- rbind(AA_segments[[loci[i]]][1,])[which(rbind(AA_segments[[loci[i]]][1,])[,"locus"]==loci[[i]]),'AAsequence']
#splits AA_sequence column at every amino acid, resulting in a list of split amino acids for each row
pepsplit[[loci[i]]] <- sapply(AA_segments[[loci[i]]][,"AAsequence"],strsplit,split="*")
#fills in space with NA for alleles with premature termination to make it the same number of characters
#as the reference sequence
pepsplit[[loci[i]]]<- lapply(pepsplit[[loci[i]]],function(x) c(x,rep("NA",nchar(refexon[[loci[i]]])-length(x))))
#binds pep_split together by element in its previous list form by row
pepsplit[[loci[i]]]<- do.call(rbind,pepsplit[[loci[i]]])
#nullifies row names
rownames(pepsplit[[loci[i]]]) <- NULL
#binds all columns together to form desired ouput, as described above
AA_segments[[loci[i]]] <- cbind.data.frame(AA_segments[[loci[i]]][,1:4],pepsplit[[loci[i]]], stringsAsFactors=FALSE)
#finds positions in AA_segments that have ".", indicating an inDel
inDels[[loci[[i]]]]<-colnames(AA_segments[[loci[[i]]]][1, 5:ncol(AA_segments[[loci[[i]]]])][AA_segments[[loci[[i]]]][1, 5:ncol(AA_segments[[loci[[i]]]])] %in% "."])
#inputs AA_segments alignment sequence into the corr_table with "InDel" still present
corr_table[[loci[[i]]]][1,]<-names(AA_segments[[loci[[i]]]][5:ncol(AA_segments[[loci[[i]]]])])
if(length(inDels[[loci[[i]]]])!=0){
for(b in 1:length(inDels[[loci[[i]]]])){
corr_table[[loci[[i]]]][2,][inDels[[loci[[i]]]][[b]]==corr_table[[loci[[i]]]][1,]]<-paste("InDel", b, sep="_")
}
}
#if alignment start is position 1, alignment start does not need to be accounted for
#when determining length of corr_table in re-enumerating corr_table with InDels
if(alignment_start[[loci[[i]]]]==1){
#fixes enumerations following "InDel"
corr_table[[loci[[i]]]][2,][!grepl("InDel", corr_table[[loci[[i]]]][2,])]<-(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))))[!(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))))==0]
}
else{
corr_table[[loci[[i]]]][2,][!grepl("InDel", corr_table[[loci[[i]]]][2,])]<-(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))+alignment_start[[loci[[i]]]]))[!(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))+alignment_start[[loci[[i]]]]))==0]
}
#renames columns in AA_segments
colnames(AA_segments[[loci[i]]]) <- c("locus","allele","trimmed_allele","allele_name", 1:ncol(corr_table[[loci[[i]]]]))
#distributes reference sequence from row 1
#into all other rows, if they contain a "-"
#amino acids with changes will not be impacted
for(k in 5:ncol(AA_segments[[loci[i]]])) {
AA_segments[[loci[i]]][,k][which(AA_segments[[loci[i]]][,k]=="-")] <- AA_segments[[loci[i]]][,k][1]}
#for loop for subsetting AA_segments by matching exon start and end cells from AA_atlas
#column names of AA_segments, which are AA positions
#subsets relevant amino acids, inputting them into a list
#binds previous columns with locus, allele, trimmed allele, and allele name information
#subsets first exon for all loci
#HLA-A, B, and C's first exons end at -1 (i.e exon 2 begins at position 1), so
#the matching end atlas coordinate must be substracted by 2, since there is
#no position zero in the alignment
#HLA-DQB1, DRB1, and DPB1's first exon ends at a number other than -1
#(i.e. exon 2 begins at position #2<, the matching end atlas coordinate is
#only subtracted by 1, since we do not need to
#account for there being no position zero in the alignment)
if((loci[[i]]=="A") || (loci[[i]]=="B") || (loci[[i]]=="C")){
exonlist[[i]][[1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][,5:match(as.numeric(AA_atlas[match(loci[[i]],names(AA_atlas))][[loci[i]]][[2]][[1]]-1), colnames(AA_segments[[loci[i]]]))])}
if((loci[[i]]=="DRB1") || (loci[[i]]=="DQB1") || (loci[[i]]=="DPB1")){
exonlist[[i]][[1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][,5:match(as.numeric(AA_atlas[match(loci[[i]],names(AA_atlas))][[loci[i]]][[2]][[1]]), colnames(AA_segments[[loci[i]]]))])}
#subsets last exon for loci
exonlist[[loci[i]]][[nrow(AA_atlas[[match(loci[[i]],names(AA_atlas))]])+1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][match(AA_atlas[[match(loci[[i]],names(AA_atlas))]][[2]][[length(AA_atlas[match(loci[[i]],names(AA_atlas))][[loci[i]]][[2]])]]:names(AA_segments[[loci[i]]][ncol(AA_segments[[loci[i]]])]), colnames(AA_segments[[loci[i]]]))])
#subsets N-1 exons
for(j in 1:(nrow(AA_atlas[[match(loci[i],names(AA_atlas))]])-1)){
exonlist[[loci[i]]][[j+1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][,match(AA_atlas[match(loci[i],names(AA_atlas))][[loci[i]]][[2]][[j]], colnames(AA_segments[[loci[i]]])):match(as.numeric(AA_atlas[match(loci[i],names(AA_atlas))][[loci[i]]][[2]][[j+1]]),colnames(AA_segments[[loci[i]]]))])}
#for loop for subsetting exonlist alleles to only those found in genotype data
#focuses on subsetting via the third column in exonlist, which consists of trimmed_allele data
#variable e in for loop represents number of columns per locus, which is how BIGDAWG input data is formatted
for(d in 1:length(exonlist[[loci[i]]])){
for(e in 1:2){
#finds which exonlist alleles are present in genotype data alleles
geno_alleles[[loci[i]]][[e]]<-exonlist[[loci[i]]][[d]][,3][which(exonlist[[loci[i]]][[d]][,3] %in% gdata[which(colnames(gdata)%in%loci[[i]]==TRUE)][,e]==TRUE)]
}}
#merges both sets of unique alleles found in exonlist and gets rid of duplicates
geno_alleles[[loci[i]]]<-unique(append(geno_alleles[[loci[i]]][[1]], geno_alleles[[loci[i]]][[2]]))
#creates a variable geno_exonlist, with the number of elements equal to how many exons there are for an allele
geno_exonlist[[loci[i]]]<-sapply(exonlist[[loci[i]]], function(x) NULL)
#reads in text file of of latest, full allele history -- chooses most recent allele release to set as HLA_alleles
#LT
HLA_alleles<-read.csv("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Allelelist_history.txt", header=TRUE, stringsAsFactors = FALSE, skip=6,sep=",")[,c(1,2)]
#compiles a list of CWD alleles and inserts them into a new variable
CWDalleles<-CWDverify()
#makes a list of lists based on the number of exons for a given locus
nonCWD_checked[[loci[[i]]]]<-singleAA_exon[[loci[[i]]]]<-singleAA_alleles[[loci[[i]]]]<-pastedAAseq[[loci[[i]]]]<-columns[[loci[[i]]]]<-all_gdata[[loci[[i]]]]<-nonCWDtrunc[[loci[[i]]]]<-genotype_variants[[loci[[i]]]]<-sapply(exonlist[[loci[[i]]]], function(x) NULL)
#subsets exonlist alleles to those found in genotype data and inserts them into a new list
#geno_exonlist
for(d in 1:length(exonlist[[loci[i]]])){
geno_exonlist[[loci[i]]][[d]]<-subset(exonlist[[loci[i]]][[d]], exonlist[[loci[i]]][[d]][,3]%in%geno_alleles[[loci[i]]])
geno_exonlist[[loci[i]]][[d]]<-cbind.data.frame("accessions"=HLA_alleles[,1][match(geno_exonlist[[loci[i]]][[d]]$allele_name, HLA_alleles[,2])], geno_exonlist[[loci[i]]][[d]], stringsAsFactors=FALSE)
geno_exonlist[[loci[i]]][[d]]<-cbind.data.frame("CWD"=ifelse(geno_exonlist[[loci[i]]][[d]]$accessions %in% CWDalleles$Accession, "CWD", "NON-CWD"), geno_exonlist[[loci[i]]][[d]], stringsAsFactors=FALSE)
#subsets geno_exonlist to only containing CWD alleles via accession number
#and stores it to a new variable, all_gdata
#NOTE: all g_data will be a master copy of all variants of genotype data alleles
if(any(geno_exonlist[[loci[i]]][[d]]$CWD=="CWD")){
all_gdata[[loci[i]]][[d]]<-na.omit(geno_exonlist[[loci[i]]][[d]][geno_exonlist[[loci[i]]][[d]]$accessions%in%CWDalleles$Accession,])}
#compares whether all truncated alleles in all_gdata are in geno_alleles
#returns truncated alleles that are not CWD, but that are present in geno_alleles
nonCWDtrunc[[loci[i]]]<-cbind(geno_alleles[[loci[i]]]%in%all_gdata[[loci[i]]][[d]]$trimmed_allele, geno_alleles[[loci[i]]])[which(cbind(geno_alleles[[loci[i]]], geno_alleles[[loci[i]]]%in%all_gdata[[loci[i]]][[d]]$trimmed_allele)==FALSE)]
if (length(nonCWDtrunc[[loci[i]]]) != 0) {
#obtains non-CWD genotype variants in the genotype dataset
for(b in 1:length(nonCWDtrunc[[loci[i]]])){
genotype_variants[[loci[i]]][[d]][[b]]<-subset(geno_exonlist[[loci[i]]][[d]], geno_exonlist[[loci[i]]][[d]]$trimmed_allele==nonCWDtrunc[[loci[i]]][[b]])
#if the non-CWD allele only has one variant, bind it to all_gdata
if(nrow(genotype_variants[[loci[i]]][[d]][[b]])==1){all_gdata[[loci[[i]]]][[d]]<-rbind(all_gdata[[loci[[i]]]][[d]],genotype_variants[[loci[[i]]]][[d]][[b]])}
#if the non-CWD allele has more than one variant, extract number of amino acid columns
#present for a given exon
if(nrow(genotype_variants[[loci[i]]][[d]][[b]])>1){
columns[[loci[i]]][[d]]<-7:length(genotype_variants[[loci[i]]][[d]][[b]])
#if an exon for a non-CWD allele has more than one amino acid column, paste all the columns together to obtain
#the amino acid sequence which is stored in pastedAAseq
#pastedAAseq is evaluated to find which allele variant has the most complete sequence by counting the number of
#character, omitting * (notation for unknown amino acid)
#the allele with the most compelte sequence is bound to all_gdata
if(length(columns[[loci[i]]][[d]])>1){
pastedAAseq[[loci[i]]][[d]]<-apply(genotype_variants[[loci[i]]][[d]][[b]][ , columns[[loci[i]]][[d]]] , 1 , paste , collapse = "" )
all_gdata[[loci[i]]][[d]]<-rbind(all_gdata[[loci[i]]][[d]], genotype_variants[[loci[i]]][[d]][[b]][names(pastedAAseq[[loci[i]]][[d]][which.max(nchar(gsub("[*^]","",pastedAAseq[[loci[i]]][[d]])))]),])}
#if an exon for a non-CWD allele has one amino acid column (i.e. exon 8 for HLA-A), store it into a separate
#variable, singleAA_alleles
if(length(columns[[loci[i]]][[d]])==1){
singleAA_exon[[loci[i]]][[b]]<-genotype_variants[[loci[i]]][[d]][[b]][ncol(genotype_variants[[loci[i]]][[d]][[b]])==7]
singleAA_alleles[[loci[i]]]<-singleAA_exon[[loci[i]]][lapply(singleAA_exon[[loci[i]]], length)>0]}}}
#evaluates whether a variant amino acid is present and subsets it to nonCWD_checked if there is one
#otherwise, if nonCWDchecked only contains *, use *
for(c in 1:length(singleAA_alleles[[loci[i]]])){
if(any(singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]!="*")==TRUE) {nonCWD_checked[[loci[i]]][[c]]<-subset(singleAA_alleles[[loci[i]]][[c]], singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]!="*")[1,]}
if(any(singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]!="*")==FALSE){nonCWD_checked[[loci[i]]][[c]]<-subset(singleAA_alleles[[loci[i]]][[c]], singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]=="*")[1,]}
}
#binds narrowed down non-CWD alleles for one amino acid exons and inputs it back IF there is a one columned amino acid
#if not, nothing happens
if(length(columns[[loci[i]]][[d]])==1){
all_gdata[[loci[i]]][[d]]<-rbind(all_gdata[[loci[i]]][[d]][ncol(all_gdata[[loci[i]]][[d]])==7], rbind(nonCWD_checked[[loci[i]]][[1]], nonCWD_checked[[loci[i]]][[2]]))}}
}
#creates a new variable, position_parsed, with pre-defined elements based on
#column names in AA_segments (i.e. position in the peptide sequence)
position_parsed[[loci[i]]]<-sapply(colnames(AA_segments[[loci[i]]][,5:ncol(AA_segments[[loci[i]]])]), function(x) NULL)
#for loop to extract only variant amino acids and input them into their respective element positions
#in position_parsed
#extracts only variant amino acids, discounting NA and unknown alleles (*)
for(a in 1:length(all_gdata[[loci[i]]])){
for(b in 1:length(7:ncol(all_gdata[[loci[i]]][[a]]))){
position_parsed[[loci[i]]][match(colnames(all_gdata[[loci[i]]][[a]][7:ncol(all_gdata[[loci[i]]][[a]])]), names(position_parsed[[loci[i]]]))][[b]]<-unique(subset(all_gdata[[loci[i]]][[a]][c(5,b+6)], (all_gdata[[loci[i]]][[a]][b+6]!=all_gdata[[loci[i]]][[a]][,b+6][1]) & (all_gdata[[loci[i]]][[a]][b+6] != "*") & (all_gdata[[loci[i]]][[a]][b+6] != "NA")))}}
#removes invariant positions (i.e elements with no rows )
#inDels will be filtered out via a is.null application
position_parsed[[loci[i]]]<-position_parsed[[loci[i]]][sapply(position_parsed[[loci[[i]]]][which(lapply(position_parsed[[loci[[i]]]], is.null)==FALSE)], nrow)>0]
#further subsets position_parsed to only variant positions with polymorphic amino acids
for(g in 1:length(position_parsed[[loci[i]]])){
position_parsed[[loci[i]]][[g]]<-subset(position_parsed[[loci[i]]][[g]], length(unique(position_parsed[[loci[i]]][[g]][,2]))!=1)}
#removes elements without polymorphic amino acids
position_parsed[[loci[i]]]<-position_parsed[[loci[i]]][sapply(position_parsed[[loci[i]]], nrow)>0]
variantAApositions[[loci[[i]]]]<-sapply(position_parsed[[loci[[i]]]], function(x) NULL)
for(j in 1:length(all_gdata[[loci[[i]]]])){
for(k in 1:length(names(variantAApositions[[loci[[i]]]]))){
if(any(colnames(all_gdata[[loci[[i]]]][[j]])==names(variantAApositions[[loci[[i]]]])[[k]])){variantAApositions[[loci[[i]]]][names(variantAApositions[[loci[[i]]]])==names(variantAApositions[[loci[[i]]]])][[k]]<-cbind.data.frame(trimmed_allele=all_gdata[[loci[[i]]]][[1]][,5], all_gdata[[loci[[i]]]][[j]][colnames(all_gdata[[loci[[i]]]][[j]])==names(variantAApositions[[loci[[i]]]])[[k]]], stringsAsFactors=FALSE)}}}
#creates a dataframe that will go into BIGDAWG, #where each variant position has 2 columns to match each locus specific
#column in genotype data
#columns 1 and 2 of this dataframe are adapted from genotype data columns
#patientID and disease status
mastertable[[loci[[i]]]]<- data.frame(gdata[,c(1,2)], matrix("", ncol = length(variantAApositions[[loci[[i]]]])*2), stringsAsFactors = F)
mastertablecols[[loci[[i]]]]<-names(position_parsed[[loci[[i]]]])
#repeats variant amino acid positions twice and stores them for future naming of
#master table column
for(t in 1:length(mastertablecols[[loci[[i]]]])){
rep_variantAA[[loci[[i]]]][[t]]<-rep(mastertablecols[[loci[[i]]]][[t]],2)}
#renames column names
colnames(mastertable[[loci[[i]]]])<-c("SampleID", "Disease", unlist(rep_variantAA[[loci[[i]]]]))
for(u in 1:length(gdata[loci[[i]]==colnames(gdata)])){
for(s in 1:length(variantAApositions[[loci[[i]]]])){
mastertable[[loci[[i]]]][names(variantAApositions[[loci[[i]]]][[s]][2]) == names(mastertable[[loci[[i]]]])][[u]]<-variantAApositions[[loci[[i]]]][[s]][,2][match(gdata[loci[[i]]==colnames(gdata)][[u]], variantAApositions[[loci[[i]]]][[s]][,1])]
}
}
#Fixes the alignment - output will be in true alignment instead of positional order.
for (x in 3:ncol(mastertable[[loci[[i]]]])) {
for (y in 1:(ncol(corr_table[[loci[[i]]]]))) {
if (corr_table[[loci[[i]]]][[1,y]] == colnames(mastertable[[loci[[i]]]][x])) {
colnames(mastertable[[loci[[i]]]])[x] <- corr_table[[loci[[i]]]][[2,y]]
}
}
}
}
mastertable #Vinh's addition
}
##Part 3 - Combination Analyzer##
combiAnalyzer<-function(loci, myData, KDLO, BOLO, UMLO, counter, motif_list, KDLO_list, UMLO_list, variantAAtable, loop){
#specifies a default motif list if one is not provided
if((is.null(motif_list)==TRUE)&(counter==0)){
motif_list<-c(0,2,3,4,5,6,7)
# cat("BIGCAAT: A motif list has not been provided - BIGCAAT will run until maximal OR is reached. \n") ### SJM Currently no way to provide a motif list
}
#cat("internal motif_list = ",motif_list,"\n",sep="")
#BIGDAWG analysis for iteration 0
#set output as T for statistical outputs
silenceBD <- capture.output(BOLO<-BIGDAWG(myData, HLA=F, Run.Tests="L", Missing = 2, Return=T, Output = F, Verbose = F)) ### SJM Verbose OFF, and BIGDAWG output captured to silenceBD
#unlists all lists in columns in the dataframe
BOLO<-data.frame(lapply(as.data.frame(BOLO$L$Set1$OR), function(x) unlist(x)), stringsAsFactors = F)
#creates dummy_KDLO for comparison to first BOLO ONLY on the 0th iteration
if(counter==0){
#makes dummy KDLO based on previous BOLO
dummy_KDLO<-as.data.frame(t(c("TBA-loc","TBA-allele",1.0,0.5,1.5,0.5,"NS")), stringsAsFactors = F)[rep(seq_len(nrow(as.data.frame(t(c("TBA-loc","TBA-allele",1.0,0.5,1.5,0.5,"NS")), stringsAsFactors = F))), each=nrow(BOLO)),]
dummy_KDLO[,1]<-BOLO$Locus
dummy_KDLO[,2]<-BOLO$Allele
##MAORI module
#finds difference between dummy and BOLO amino acid variants and inputs into new column
##dummy comparison only for 0th iteration
for(i in 1:nrow(BOLO)){
#finds OR difference between BOLO and dummy ORs -- subs out "-", for a blank, since only evaluating absolute value of OR diff
#adds difference to new column in BOLO
BOLO[i,8]<-gsub("-", "", as.numeric(BOLO[i,]$OR)-as.numeric(subset(subset(dummy_KDLO, grepl(BOLO[i,][[1]], dummy_KDLO[,1])), grepl(BOLO[i,][[2]], subset(dummy_KDLO, grepl(BOLO[i,][[1]], dummy_KDLO[,1]))[,2]))[,3]))[[1]]
}}
#subsets out binned alleles and any alleles with NA combinations
if(counter>0){
BOLO<-subset(BOLO, (BOLO$Allele!="binned") & (!grepl("NA", BOLO$Allele)))}
#MAORI statement for iteration 1
if(counter==1){
for(i in 1:nrow(BOLO)){
BOLO[i,8]<-gsub("-", "", as.numeric(BOLO[i,]$OR)-as.numeric(subset(subset(KDLO, KDLO[,1] %in% strsplit(BOLO[i,][[1]], ":")[[1]][[1]]), subset(KDLO, KDLO[,1] %in% strsplit(BOLO[i,][[1]], ":")[[1]][[1]])$Allele %in% strsplit(BOLO[i,][[2]], "~")[[1]][[1]])$OR))}
}
#ends function if BOLO is empty
if((counter>0) & (nrow(BOLO)==0)){
return(list(KDLO, BOLO, UMLO))}
#MAORI statement for iteration 2+
#further addition for adding a 9th column for comparison to newly made nth variants to its singular amino acid variant
if(counter>1){
for(i in 1:nrow(BOLO)){
BOLO[i,8]<-gsub("-", "", as.numeric(BOLO[i,]$OR)- as.numeric(subset(subset(KDLO, KDLO[,1] %in% paste(strsplit(BOLO[i,][[1]], ":")[[1]][c(1:length(strsplit(KDLO$Locus, ":")[[1]]))], collapse=":")), subset(KDLO, KDLO[,1] %in% paste(strsplit(BOLO[i,][[1]], ":")[[1]][c(1:length(strsplit(KDLO$Locus, ":")[[1]]))], collapse=":"))$Allele %in%paste(strsplit(BOLO[i,][[2]], "~")[[1]][c(1:length(strsplit(KDLO$Locus, ":")[[1]]))], collapse="~"))$OR))
BOLO[i,9]<-gsub("-", "", as.numeric(BOLO[i,]$OR)-as.numeric(subset(subset(KDLO_list[[1]], KDLO_list[[1]]$Locus %in% strsplit(BOLO[i,][[1]], ":")[[1]][[length(unlist(strsplit(BOLO[i,][[1]], ":")))]]), subset(KDLO_list[[1]], KDLO_list[[1]]$Locus %in% strsplit(BOLO[i,][[1]], ":")[[1]][[length(unlist(strsplit(BOLO[i,][[1]], ":")))]])$Allele %in% strsplit(BOLO[i,][[2]], "~")[[1]][[length(unlist(strsplit(BOLO[i,][[1]], ":")))]])$OR))
}}
#subsets out NS values
KDLO<-subset(BOLO,BOLO[,7]=="*")
##loop specifications -- LT
#filters out predisposing ORs for analysis
if(loop==1){
KDLO<-KDLO %>% filter(OR > 1.0)}
#filters out protective ORs for analysis
if(loop==2){
KDLO<-KDLO %>% filter(OR <1.0)}
#statement for returning BOLO if KDLO=0
if((counter>0) & (nrow(KDLO)==0)){
return(list(KDLO, BOLO, UMLO))}
#subsets out variants that have not shown >0.1 improvement from their previous variants and
#singular amino acids
if(counter>1){
#subsets out OR differences smaller than 0.1
KDLO<-subset(KDLO, KDLO[,9]>0.1)}
KDLO<-subset(KDLO, KDLO[,8]>0.1)
#statement for returning KDLO if KDLO=0
if(nrow(KDLO)==0){
return(list(KDLO, BOLO, UMLO))}
#adds in positions from original BOLO that were previously eliminated because of NS or <0.1 variant
KDLO<-unique(rbind(KDLO, subset(BOLO, BOLO$Locus%in%KDLO$Locus)))[mixedorder(row.names(unique(rbind(KDLO, subset(BOLO, BOLO$Locus%in%KDLO$Locus))))),]
#finds unassociated positions from current iteration
unassociated_posi<-unique(BOLO$Locus[!BOLO$Locus %in% KDLO$Locus])
#if length(unassociated_posi==0), return KDLO -- this means KDLO and BOLO are the same
#and max improvement has been reached
if(length(unassociated_posi)==0){
return(list(KDLO, BOLO, UMLO))
}
#pair name generation
if(counter==0){
start1<-unique(KDLO$Locus)
#if nothing is in the KDLO, return KDLO and BOLO ## LT
if((length(start1))==0){
return(list(KDLO, BOLO))
}
combinames<-sapply(start1, function(x) NULL)
for(i in 1:(length(start1)-1)){ ## range.x = 1:(N-1)
for(j in (i+1):length(combinames)){ ## range.y = x+1:N
if(names(combinames)[[j]]!=start1[[i]]){
combinames[[i]][[j]]<-paste(start1[[i]],names(combinames)[[j]],sep=":")}}}
#unlists combinames and omits NAs to obtain all unique possible pair combinations
combinames<-unlist(combinames, use.names = F)[!is.na(unlist(combinames, use.names = F))]
}
#set start as singular amino acids
if(counter>0){
start1<-unique(unlist(strsplit(KDLO$Locus, ":")))
combinames<-NULL}
if(counter>0){
possible_combis<-sapply(unique(KDLO$Locus), function(x) NULL)
#finds possible combinations by pasting names of list with singular amino acids not in that pair
for(i in 1:length(possible_combis)){
possible_combis[[i]]<-paste(names(possible_combis[i]), unique(start1[which(start1%in%strsplit(names(possible_combis[i]), ":")[[1]]==FALSE)]), sep=":")}
#splits those triplets up and sorts them numerically to later on eliminate any duplicates
for(j in 1:length(unlist(possible_combis))){
combinames[[j]]<-paste(mixedsort(strsplit(unlist(possible_combis, use.names=F), ":")[[j]], decreasing=F), collapse=":")}
combinames<-unique(mixedsort(combinames))}
###subsets combinames by successive unassociated positions
if(counter==1) {
for(i in 1:length(unassociated_posi)) {
combinames<-subset(combinames, (!grepl(paste("^", unassociated_posi[[i]], sep=""), combinames)) & (!grepl(paste(":", unassociated_posi[[i]], sep=""), combinames)))}
}
if(counter==2) {
for(i in 1:length(unassociated_posi)) {
combinames<-subset(combinames, (!grepl(paste("^", unassociated_posi[[i]], sep=""), combinames)) & (!grepl(paste(":", unassociated_posi[[i]], sep=""), combinames)))}
for(i in 1:length(UMLO_list[[counter]])){
combinames<-subset(combinames, (!grepl(paste("^", UMLO_list[[counter]][[i]], sep=""), combinames)) & (!grepl(paste(":", UMLO_list[[counter]][[i]], sep=""), combinames)))}
}
if (counter > 2) {
for(i in 1:length(unassociated_posi)) {
combinames<-subset(combinames, (!grepl(paste("^", unassociated_posi[[i]], sep=""), combinames)) & (!grepl(paste(":", unassociated_posi[[i]], sep=""), combinames)))}
for(x in counter:2) {
for(i in 1:length(UMLO_list[[x]])) {
combinames<-subset(combinames, (!grepl(paste("^", UMLO_list[[x]][[i]], sep=""), combinames)) & (!grepl(paste(":", UMLO_list[[x]][[i]], sep=""), combinames)))}
}
}
if(length(combinames)==0) {
return(list(KDLO, BOLO, UMLO))
}
#df for pairs -- length is number of unique pairs * 2,
combidf<-data.frame(variantAAtable[[loci]][,c(1,2)], matrix("", ncol =length(rep(combinames, 2))), stringsAsFactors = F)
#fills in column names
colnames(combidf)<-c("SampleID", "Disease", mixedsort(rep(unlist(combinames), 2)))
#observes number of columns for those needed to be pasted together
cols=c(1:length(strsplit(combinames[[1]], ":")[[1]]))
#[[1]] to contain amino acid combos of TRUE/FALSE
#[[2]] to contain amino acid combos of FALSE/TRUE
dfAA<-sapply(1:2, function(x) NULL)
#fills in element names in the lists formed in the above lists
for(j in 1:length(dfAA)){
dfAA[[j]]<-sapply(combinames, function(x) NULL)}
#fills in appropriate position pair combos into dfAA
for(i in 1:length(combinames)){
dfAA[[1]][[i]]<-apply(variantAAtable[[loci]][c(TRUE, FALSE)][strsplit(combinames, ":")[[i]]][,cols], 1, paste, collapse = "~")
dfAA[[2]][[i]]<-apply(variantAAtable[[loci]][c(FALSE, TRUE)][strsplit(combinames, ":")[[i]]][,cols], 1, paste, collapse = "~")
}
#fills into pair_df
combidf[,3:length(combidf)][,c(TRUE,FALSE)]<-dfAA[[1]]
combidf[,3:length(combidf)][,c(FALSE,TRUE)]<-dfAA[[2]]
#saves each iteration into specified elements in a list in a variable "myData"
#returns myData
myDataFinal<-list("KDLO"=KDLO, "BOLO"=BOLO, "combidf"=combidf, "UMLO"=unassociated_posi, "combinames"=combinames)
return(myDataFinal)
}
runCombiAnalyzer <- function(loci, variantAAtable, loop) {
#makes empty lists so results of each iteration may be stored
BOLO_list<-KDLO_list<-UMLO_list<-list()
#sets motif_list to NULL
motif_list<-NULL
#sets myData, iteration0, to variantAAtable[[loci]]
myData<-variantAAtable[[loci]]
#initiates recursion with stop=FALSE and begins the counter at 0
stop<-FALSE
counter=0
###BEGIN RECURSION -- as long as stop==FALSE, combiAnalyzer will be run until the maximum OR
#is reached, or the end of the motif_list is reached
#the recursive program receives input from combiAnalyzer, where stop=TRUE once the maximum OR
#is reached, either because the BOLO is empty, the KDLO is empty, or no more combination names
#can be made
while(stop==FALSE){
#used to inform user what iteration is currently running
# cat("BIGCAAT:", counter,ifelse(counter==1,"iteration has","iterations have"),"been run \n", sep=" ") #### SJM cleaning up messaging
cat("Evaluating",ifelse(counter==0,"initial comparison to null hypothesis \n",paste(counter,"-mers \n",sep=""))) ### SJM more accurate messaging
#saves each iteration to "interim"
cat(counter, "\n")
interim<-combiAnalyzer(loci, myData, BOLO ,KDLO, UMLO, counter, motif_list, KDLO_list, UMLO_list, variantAAtable, loop)
#adds 1 to the counter with each iteration
counter=counter+1
#saves all data to list variables made earlier
myData<-interim$combidf
KDLO<-KDLO_list[[counter]]<-interim$KDLO
BOLO<-BOLO_list[[counter]]<-interim$BOLO
UMLO<-UMLO_list[[counter]]<-interim$UMLO
#cat("external motif_list = ",motif_list,"\n",sep="")
if(is.null(nrow(KDLO))==TRUE){
cat("Maximal significant OR values identified. End of analysis of the",loci,"locus.\n\n") ### SJM cosmetic & informative changes
Results <- (list(KDLO = KDLO_list, BOLO = BOLO_list, UMLO = UMLO_list))
return (Results)
}
if((is.null(nrow(KDLO))==FALSE) & (length(motif_list)!=counter)){
## cat("BIGCAAT: Dataset is able to be further analyzed - moving on to next iteration.\n") ### SJM added break, and removed message
}
if((is.null(nrow(KDLO))==FALSE) & length(motif_list)==counter){
cat("BIGCAAT: WARNING: end of motif_list analysis, but further analysis is possible.\n") ### SJM added break
stop=TRUE
}
if((is.null(nrow(KDLO))==TRUE) & length(motif_list)==counter){
cat("BIGCAAT: End of motif_list analysis - maximal OR has been reached.\n") ### SJM added break
}
}
}
#Combining everything into one function
BIGCAAT <- function(loci, GenotypeFile) {
if (missing(loci)) { return(cat("Please specify a locus, or vector of loci to analyze.")) }
if (missing(GenotypeFile)) {
#Genotype_Data <- read.table(file.choose(), header = TRUE, sep = "\t", quote = "", na.strings = "****", colClasses = "character", check.names = FALSE)
GenotypeFile <- fileChoose("Please select a BIGDAWG-formatted genotype datset for analysis.")
}
cat("-------------------------------------------------------------------\n BIGCAAT: BIGDAWG Integrated Genotype Converted Amino Acid Testing\n-------------------------------------------------------------------\n") ### SJM Banner
# else {
Genotype_Data <- read.table(GenotypeFile, header = TRUE, sep = "\t", quote = "", na.strings = "****", colClasses = "character", check.names = FALSE)
# }
AAData <- variantAAextractor(loci, Genotype_Data) ## SJM "DRB1" was hard coded
#CombiData <- list() ### SJM incorporating locus names to CombiData
CombiData <- vector("list",length(loci))
names(CombiData) <- loci
#specifications for predisposing and protective OR analysis added by LT
CombiData <- sapply(c("Predisposing", "Protective"), function(x) NULL)
for(loop in 1:length(CombiData)){
if(loop==1){cat("Predisposing OR analysis", sep="\n")}
if(loop==2){cat("Protective OR analysis", sep="\n")}
for (p in 1:length(loci)) {
cat("Analyzing the",loci[p],"locus\n",sep=" ") ### SJM added notification
CombiData[[loop]][[loci[p]]] <- runCombiAnalyzer(loci[p], AAData, loop) #LT added loop as parameter
}
}
CombiData
}
|
/CombinedAAAnalyzer.R
|
no_license
|
VinhLuu864/BIGSPIDR
|
R
| false
| false
| 46,271
|
r
|
### BIGCAAT: BIGDAWG Integrated Genotype Converted Amino Acid Testing
### Version 0.3.3
### Authors: Liva Tran, Vinh Luu, Steven J. Mack
##Combines Datafile Procession, AA extraction, and combination analyzer into one function. Changes made for redundancy.
#Requirements
require(data.table)
require(stringr)
require(BIGDAWG)
require(gtools)
require(dplyr) ## LT
load("AA_atlas.rda")
##Part 1 - Datafile Processing##
Datafile_Processing <- function(locus, Genotype_Data) {
#Takes every other column and the one after - pairs of 2
Final_Data <- Genotype_Data[,1:2]
colnames(Final_Data) <- colnames(Genotype_Data)[1:2]
#Takes every column pair and runs it though the check function -> Gives a table of the data where the all the alleles are truncated to 2 fields and any 1 field alleles are replaced by NA
for (x in seq(3,length(Genotype_Data),2)) {
if (colnames(Genotype_Data[x]) %in% locus) {
Allele_Columns <- Genotype_Data[,x:(x+1)] ## not a list of lists
# print(paste("Column pairs:", x,(x+1), sep = " ")) ### SJM silencing unnessary messaging
colnames(Allele_Columns) <- colnames(Genotype_Data)[x:(x+1)]
Final_Data <- cbind(Final_Data, Dataset_Allele_Check_V2(Allele_Columns))
}
}
Final_Data
}
Dataset_Allele_Check_V2 <- function(Alleles) {
#Declaring needed variables
count <- a <- 0
Temp_List <- apply(Alleles, FUN = GetField, Res = 1, MARGIN = c(1,2))
Final_Alleles <- data.frame(Alleles, check.names = FALSE) #This will get returned later. We will modify this with the following for loop.
#Takes each column and creates a logical table (T if 1 field, F otherwise) -> Following the logical table, replace data with NA if 1 field, and all other data with 2 field, regardless of initial field count. I.E "12:24" stays "12:24" but "12:52:42" truncates to "12:52"
for (i in 1:2) {
comparison <- Alleles[,i] %in% Temp_List[,i]
count <- sum(length(which(comparison))) + count #Counts number of 1 field alleles, which show up as TRUE in the comparison table.
a <- matrix(ifelse(comparison, NA, sapply(Alleles[,i], FUN = GetField, Res = 2)), nrow(Alleles), 1, byrow = FALSE) #a is temporary list for easier replacement of rows.
Final_Alleles[[i]] <- a
}
# as.matrix(Final_Alleles)
#Calculates percentage of the data that is 1 field, outputs an integer value denoting how many 1 field alleles were in the data and outputs a percentage.
percentage <- (count / (nrow(Alleles) * 2))
# print(paste("The number of single field Alleles is:", count, sep = " ")) ### SJM silencing unnecessary messages
# print(paste("The percentage of single field Alleles in this column pair is:", percentage, sep = " ")) ### SJM as above
#Checks if the percentage of single field alleles is below a certain threshold. This is currently not changable by the user but can be implemented.
if (percentage > .05) {
stop("This column pair has too many alleles that are single field.")
} else {
# print("This column pair is good to go!") ### SJM silencing unnecessary messages
}
Final_Alleles
}
##Part 2 - Amino Acid Extraction##
countSpaces <- function(x){
counter <- 0
coll <- numeric()
vec <- strsplit(x," ")[[1]]
for(i in 1:length(vec)){
if (vec[i]==""){
counter <- counter+1
}
else{
if (counter!=0) coll <- c(coll,counter)
counter <- 1
}
}
coll
}
CWDverify <- function(){
require(data.table)
## Pull down the CWD catalogue
CWD <- list()
CWD$data <- fread("https://www.uvm.edu/~igdawg/pubs/cwd200_alleles.txt",skip = 1,stringsAsFactors = FALSE,select = c(2,3),showProgress = FALSE)
CWD$version <- fread("https://www.uvm.edu/~igdawg/pubs/cwd200_alleles.txt",nrows = 1,stringsAsFactors = FALSE,select=1,showProgress = FALSE)
## Pull down the hla_nom.txt, Deleted_alleles.txt and allelelist.txt files to create a table of v3.0.0+ deleted alleles, their ACCs,their replacements, and their ACCs
deletedHLA <- list()
# Temporarily store the entire hla_nom.txt in $version
deletedHLA$version <- fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/wmda/hla_nom.txt",skip=6, stringsAsFactors = FALSE,sep = ";", col.names = c("Locus","AlleleName","NewName","Event"),select = c(1,2,5,6),showProgress = FALSE)
## Exclude entries without allele name changes
deletedHLA$data <- deletedHLA$version[deletedHLA$version$NewName !="",]
# Exclude pre-db release 3.0.0 alleles
deletedHLA$data <- deletedHLA$data[grep(":",deletedHLA$data$AlleleName,fixed=TRUE),]
## Process and extract the accession numbers from the Deleted_alleles.txt file, temporarily stored in $version
deletedHLA$version <- fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Deleted_alleles.txt",stringsAsFactors = FALSE,skip = 7,sep=",",header=TRUE,fill=TRUE,showProgress = FALSE)
## Below to account for one extra comma in line 106 (hopefully, can be deleted in a future release)
if(ncol(deletedHLA$version)==4) {deletedHLA$version$Description[98] <- paste(deletedHLA$version$Description[98],deletedHLA$version$V4[98],sep=" ")
deletedHLA$version <- deletedHLA$version[,1:3] }
# Store the pertinent accession numbers in the data element
deletedHLA$data$origAccession <- deletedHLA$version$AlleleID[match(paste(deletedHLA$data$Locus,deletedHLA$data$AlleleName,sep=""),deletedHLA$version$Allele)]
# Temporarily store the allelelist.txt file in $version
deletedHLA$version <- fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Allelelist.txt",skip=6, stringsAsFactors = FALSE,sep = ",", header=TRUE,showProgress = FALSE)
deletedHLA$data$newAccession <- deletedHLA$version$AlleleID[match(paste(deletedHLA$data$Locus,deletedHLA$data$NewName,sep=""),deletedHLA$version$Allele)]
# overwrite the Deleted_alelles.txt files with the version information
deletedHLA$version <- cbind(fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/wmda/hla_nom.txt",stringsAsFactors = FALSE,nrows = 5,sep="?",header=TRUE,showProgress = FALSE),fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Deleted_alleles.txt",stringsAsFactors = FALSE,nrows = 5,sep="?",header=TRUE,showProgress = FALSE), fread("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Allelelist.txt",nrows=5, stringsAsFactors = FALSE,sep = "?", header=TRUE,showProgress = FALSE))
## Match accession numbers in CWD to the Accession numbers in the deleted alleles.
changeCWD <- match(CWD$data$`IMGT/HLA Accession Number`,deletedHLA$data$origAccession)
# Create full allele names for the new names
deletedHLA$data$NewName <- paste(deletedHLA$data$Locus,deletedHLA$data$NewName,sep="")
CWD$data[!is.na(changeCWD),] <- cbind(deletedHLA$data[changeCWD[!is.na(changeCWD)],6],deletedHLA$data[changeCWD[!is.na(changeCWD)],3])
# Rename the columns of the verified CWD table
colnames(CWD$data) <- c("Accession","AlleleName")
CWD$data
}
variantAAextractor<-function(loci,genotypefiles){
#reads in genotype data
#gdata <- read.table(genotypefiles, sep="\t", header=T, check.names = F, stringsAsFactors = F)
gdata <- genotypefiles
gdata <- Datafile_Processing(loci, gdata) #Vinh's function
#sets blank cells to NA
#if cells do not contain NA, locus names are pasted to the allele in the MS_file
for (i in 3:ncol(gdata)){
# gdata[gdata==""]<-NA
gdata[[i]]<-ifelse(is.na(gdata[[i]])==FALSE, paste(colnames(gdata[i]),gdata[,i],sep="*"), NA)}
#removes rows with only ALL NA data
gdata<-gdata[!(rowSums(is.na(gdata))==ncol(gdata)-2),]
#creates empty variables for future for loops
start<-end<-alignment<-list()
#creates empty variables where each element is named after the used loci
#empty variables for correspondence table
inDels<-corr_table<-cols<-downloaded_segments<-w<-alignment_positions<-alignment_length<-alignment_start<-prot_extractions<-refblock_number<-end_char<-space_diff<-
#empty variables for exon_extractor function
variantAApositions<-geno_exonlist<-missing_geno_output<-missing_geno<-rep_variantAA<-mastertablecols<-mastertable<-position_parsed<-nonCWD_checked<-nonCWDtrunc<-singleAA_exon<-singleAA_alleles<-pastedAAseq<-columns<-all_gdata<-genotype_variants<-geno_alleles<-AA_segments<-AA_aligned <-refexon<-pepsplit<-alignment<-exonlist<- sapply(loci, function(x) NULL)
#begin for loop
for(i in 1:length(loci)){
#downloads relevant locus alignment file -- readLines allows for space preservation, which is important in
#finding where the alignment sequence starts
#alignment[[loci[i]]] <- readLines(paste("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/alignments/",paste(ifelse(loci[[i]]=="DRB1","DRB",loci[[i]]),"_prot.txt",sep=""),sep=""),-1,ok=TRUE,skipNul = FALSE)
alignment[[loci[i]]] <- readLines(paste("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/alignments/",paste(ifelse(loci[[i]] %in% c("DRB1", "DRB3", "DRB4", "DRB5"),"DRB",loci[[i]]),"_prot.txt",sep=""),sep=""),-1,ok=TRUE,skipNul = FALSE)
#alters alignment file by cutting out non-pertinent information in beginning
#and endind of alignment file
alignment[[loci[i]]] <- head(alignment[[loci[i]]],-3)
alignment[[loci[i]]] <- tail(alignment[[loci[i]]],-7)
#see countSpaces function at beginning of script
#Counts difference between Prot to -30 and beginning of Prot to -30 + 1 due to zero number indexing to find where
#the alignment sequence actually starts
space_diff[[loci[i]]]<-(nchar(strsplit(alignment[[loci[i]]][3], " ")[[1]][2])+countSpaces(alignment[[loci[i]]][3])[2]+1)-countSpaces(alignment[[loci[i]]][2])[1]
#reduces repeated whitespace in alignment file and removes rows with empty values for proper
#start and stop subsetting
alignment[[loci[i]]] <-str_squish(alignment[[loci[i]]])
alignment[[loci[i]]] <-alignment[[loci[i]]][-which(alignment[[loci[i]]] == "")]
#determines positions of "Prot" and the end of that reference block segment
start[[loci[i]]]<-as.numeric(grep("Prot", alignment[[loci[i]]]))
end[[loci[i]]] <- as.numeric(c(start[[loci[i]]][2:length(start[[loci[i]]])]-1,length(alignment[[loci[i]]])))
#counts number of characters in the very last allele to add onto the last Prot enumeration block
#to obtain end length
end_char[[loci[i]]]<-nchar(sapply(strsplit(gsub(" ", "", sub(" ", "~", str_squish(tail(alignment[[loci[i]]], 1)))), "~"), "[", 2))-1
#extracts rows with "Prot" and reference sequence position information
#extracts only relevant reference sequence positions
#NOTE: the first row containing "Prot" contains two numbers -- -30 and 1 -- where only -30, is extracted,
#as the actual sequence start will always be 1
for (j in 1:length(start[[loci[i]]])){
prot_extractions[[loci[i]]][j]<-strsplit(alignment[[loci[i]]][start[[loci[i]]][j]], " ")
refblock_number[[loci[i]]][j]<-as.numeric(sapply(prot_extractions[[loci[i]]][j], "[", 2))
#determines the alignment start by adding -30 to the difference between white spaces found above
alignment_start[[loci[i]]]<-refblock_number[[loci[i]]][1]+space_diff[[loci[i]]]
}
#closes all white space in the alignment file, except for the white space separating the allele and peptide sequence
alignment[[loci[i]]] <-paste(substr(alignment[[loci[i]]],1,regexpr(" ",text = alignment[[loci[i]]],fixed = TRUE)), gsub(" ","",substr(alignment[[loci[i]]],regexpr(" ",text = alignment[[loci[i]]],fixed = TRUE),nchar(alignment[[loci[i]]]))),sep = "")
#string splits at white spaces to yield allele and peptide sequences
alignment[[loci[i]]] <- strsplit(alignment[[loci[i]]]," ", fixed=T)
#binds the previously split strings by row
alignment[[loci[i]]] <- do.call(rbind,alignment[[loci[i]]])
#if the pepseq column is equal to the allele column due to premature peptide termination,
#insert a blank in place of the allele in the pepseq column
alignment[[loci[i]]][which(alignment[[loci[i]]][,1]==alignment[[loci[i]]][,2]),2] <- ""
#renames columns to "alleles" and "pepseq"
colnames(alignment[[loci[i]]])<-c(paste(loci[[i]], "alleles", sep="_"), "pepseq")
#due to ANHIG formatting, cases where an allele contains newly reference peptide sequences will not
#contain the same number of rows as previous reference peptide blocks
#this for loop is invoked to add "."for all other alleles for each character in the newly reference peptide
#to preserve structural integrity
for(k in 1:length(start[[loci[i]]])){
if(nrow(alignment[[i]][start[[loci[i]]][k]:end[[loci[i]]][k],])!=nrow(alignment[[loci[i]]][start[[loci[i]]][1]:end[[loci[i]]][1],])){
x<-as.data.frame(alignment[[loci[i]]][,1][start[[loci[i]]][1]:end[[loci[i]]][1]][-c(1,2)], stringsAsFactors = F)
colnames(x)<-paste(loci[[i]], "alleles", sep="_")
x<-cbind.data.frame(x, pepseq=as.character(paste(rep(".", nchar(tail(alignment[[loci[i]]][,2], 1))), collapse = "")), stringsAsFactors=FALSE)
y<-data.frame(tail(alignment[[loci[i]]],1), stringsAsFactors = F)
x$pepseq[match(y[,1], x[,1])]<-y$pepseq
alignment[[loci[i]]]<-as.matrix(rbind(head(alignment[[loci[i]]], -1), x))
start[[loci[i]]]<-as.numeric(grep("Prot", alignment[[loci[i]]]))
end[[loci[i]]] <- as.numeric(c(start[[loci[i]]][2:length(start[[loci[i]]])]-1,nrow(alignment[[loci[i]]])))}}
#if a locus has extra formatting, resulting in unqeual rows, start and end will be updated to reflect subsetting
#if a locus has no extra formatting, start and end will remain the same, as procured by earlier code
for(e in 1:length(start[[loci[i]]])){
AA_segments[[loci[i]]]<-cbind(AA_segments[[loci[i]]], alignment[[loci[i]]][start[[loci[i]]][e]:end[[loci[i]]][e],])}
#removes first two rows containing AA position and "Prot"
AA_segments[[loci[i]]] <- AA_segments[[loci[i]]][-c(1,2),]
#designates columns to be combined as every other so allele names are not included
#in pasting all the amino acid sequences together
cols<-seq(0, ncol(AA_segments[[loci[i]]]), by=2)
AA_segments[[loci[i]]]<-cbind(AA_segments[[loci[i]]][,1], apply(AA_segments[[loci[i]]][,cols], 1 ,paste, collapse = ""))
#creates a new matrix with the number of columns equal to the number of characters in the reference sequence
corr_table[[loci[i]]]<-matrix(NA, nrow = 2, ncol = as.numeric(nchar(AA_segments[[loci[i]]][,2][1]))) ### SJM added NA argument
#determines alignment length based on the total number of characters plus the alignment start (which is negative )
alignment_length[[loci[i]]]<-as.numeric(nchar(AA_segments[[loci[i]]][,2][1]))+alignment_start[[loci[[i]]]]
#pastes alignment_start to alignment_length together in sequential order, with inDels accounted for
#captures output as "w"
w[[i]] <- capture.output(cat(alignment_start[[loci[i]]]:alignment_length[[loci[i]]]))
#splits string formed by cat for separate character variables
alignment_positions[[loci[i]]]<-as.character(unlist(strsplit(w[[loci[i]]], " ")))
#eliminates "0", as the alignment sequence from ANHIG does not contain 0
alignment_positions[[loci[i]]]<-alignment_positions[[loci[i]]][-which(alignment_positions[[loci[i]]] == 0)]
#contains alignment sequence information
corr_table[[loci[i]]][2,]<-alignment_positions[[loci[i]]]
#string splits to extract locus in the allele name
#assigns to new variable "AA_aligned"
AA_aligned[[loci[i]]]<- as.matrix(do.call(rbind,strsplit(AA_segments[[loci[i]]][,1],"[*]")))
#adds a new column of pasted locus and trimmed two field alleles to AA_aligned
AA_aligned[[loci[i]]]<- cbind(AA_aligned[[loci[i]]], paste(AA_aligned[[loci[i]]][,1], apply(AA_aligned[[loci[i]]],MARGIN=c(1,2),FUN=GetField,Res=2)[,2], sep="*"))
#binds AA_aligned and AA_segments -- renames columns
AA_segments[[loci[i]]] <- cbind(AA_aligned[[loci[i]]], AA_segments[[loci[i]]])
colnames(AA_segments[[loci[i]]]) <- c("locus", "full_allele", "trimmed_allele", "allele_name", "AAsequence")
#sets refexon to a reference peptide for each HLA locus based on the reference sequences in AA_segments
refexon[[loci[i]]] <- rbind(AA_segments[[loci[i]]][1,])[which(rbind(AA_segments[[loci[i]]][1,])[,"locus"]==loci[[i]]),'AAsequence']
#splits AA_sequence column at every amino acid, resulting in a list of split amino acids for each row
pepsplit[[loci[i]]] <- sapply(AA_segments[[loci[i]]][,"AAsequence"],strsplit,split="*")
#fills in space with NA for alleles with premature termination to make it the same number of characters
#as the reference sequence
pepsplit[[loci[i]]]<- lapply(pepsplit[[loci[i]]],function(x) c(x,rep("NA",nchar(refexon[[loci[i]]])-length(x))))
#binds pep_split together by element in its previous list form by row
pepsplit[[loci[i]]]<- do.call(rbind,pepsplit[[loci[i]]])
#nullifies row names
rownames(pepsplit[[loci[i]]]) <- NULL
#binds all columns together to form desired ouput, as described above
AA_segments[[loci[i]]] <- cbind.data.frame(AA_segments[[loci[i]]][,1:4],pepsplit[[loci[i]]], stringsAsFactors=FALSE)
#finds positions in AA_segments that have ".", indicating an inDel
inDels[[loci[[i]]]]<-colnames(AA_segments[[loci[[i]]]][1, 5:ncol(AA_segments[[loci[[i]]]])][AA_segments[[loci[[i]]]][1, 5:ncol(AA_segments[[loci[[i]]]])] %in% "."])
#inputs AA_segments alignment sequence into the corr_table with "InDel" still present
corr_table[[loci[[i]]]][1,]<-names(AA_segments[[loci[[i]]]][5:ncol(AA_segments[[loci[[i]]]])])
if(length(inDels[[loci[[i]]]])!=0){
for(b in 1:length(inDels[[loci[[i]]]])){
corr_table[[loci[[i]]]][2,][inDels[[loci[[i]]]][[b]]==corr_table[[loci[[i]]]][1,]]<-paste("InDel", b, sep="_")
}
}
#if alignment start is position 1, alignment start does not need to be accounted for
#when determining length of corr_table in re-enumerating corr_table with InDels
if(alignment_start[[loci[[i]]]]==1){
#fixes enumerations following "InDel"
corr_table[[loci[[i]]]][2,][!grepl("InDel", corr_table[[loci[[i]]]][2,])]<-(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))))[!(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))))==0]
}
else{
corr_table[[loci[[i]]]][2,][!grepl("InDel", corr_table[[loci[[i]]]][2,])]<-(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))+alignment_start[[loci[[i]]]]))[!(alignment_start[[loci[[i]]]]:((length(corr_table[[loci[[i]]]][2,])-length(corr_table[[loci[[i]]]][2,][grepl("InDel", corr_table[[loci[[i]]]][2,])]))+alignment_start[[loci[[i]]]]))==0]
}
#renames columns in AA_segments
colnames(AA_segments[[loci[i]]]) <- c("locus","allele","trimmed_allele","allele_name", 1:ncol(corr_table[[loci[[i]]]]))
#distributes reference sequence from row 1
#into all other rows, if they contain a "-"
#amino acids with changes will not be impacted
for(k in 5:ncol(AA_segments[[loci[i]]])) {
AA_segments[[loci[i]]][,k][which(AA_segments[[loci[i]]][,k]=="-")] <- AA_segments[[loci[i]]][,k][1]}
#for loop for subsetting AA_segments by matching exon start and end cells from AA_atlas
#column names of AA_segments, which are AA positions
#subsets relevant amino acids, inputting them into a list
#binds previous columns with locus, allele, trimmed allele, and allele name information
#subsets first exon for all loci
#HLA-A, B, and C's first exons end at -1 (i.e exon 2 begins at position 1), so
#the matching end atlas coordinate must be substracted by 2, since there is
#no position zero in the alignment
#HLA-DQB1, DRB1, and DPB1's first exon ends at a number other than -1
#(i.e. exon 2 begins at position #2<, the matching end atlas coordinate is
#only subtracted by 1, since we do not need to
#account for there being no position zero in the alignment)
if((loci[[i]]=="A") || (loci[[i]]=="B") || (loci[[i]]=="C")){
exonlist[[i]][[1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][,5:match(as.numeric(AA_atlas[match(loci[[i]],names(AA_atlas))][[loci[i]]][[2]][[1]]-1), colnames(AA_segments[[loci[i]]]))])}
if((loci[[i]]=="DRB1") || (loci[[i]]=="DQB1") || (loci[[i]]=="DPB1")){
exonlist[[i]][[1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][,5:match(as.numeric(AA_atlas[match(loci[[i]],names(AA_atlas))][[loci[i]]][[2]][[1]]), colnames(AA_segments[[loci[i]]]))])}
#subsets last exon for loci
exonlist[[loci[i]]][[nrow(AA_atlas[[match(loci[[i]],names(AA_atlas))]])+1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][match(AA_atlas[[match(loci[[i]],names(AA_atlas))]][[2]][[length(AA_atlas[match(loci[[i]],names(AA_atlas))][[loci[i]]][[2]])]]:names(AA_segments[[loci[i]]][ncol(AA_segments[[loci[i]]])]), colnames(AA_segments[[loci[i]]]))])
#subsets N-1 exons
for(j in 1:(nrow(AA_atlas[[match(loci[i],names(AA_atlas))]])-1)){
exonlist[[loci[i]]][[j+1]]<-cbind(AA_segments[[loci[i]]][,1:4], AA_segments[[loci[i]]][,match(AA_atlas[match(loci[i],names(AA_atlas))][[loci[i]]][[2]][[j]], colnames(AA_segments[[loci[i]]])):match(as.numeric(AA_atlas[match(loci[i],names(AA_atlas))][[loci[i]]][[2]][[j+1]]),colnames(AA_segments[[loci[i]]]))])}
#for loop for subsetting exonlist alleles to only those found in genotype data
#focuses on subsetting via the third column in exonlist, which consists of trimmed_allele data
#variable e in for loop represents number of columns per locus, which is how BIGDAWG input data is formatted
for(d in 1:length(exonlist[[loci[i]]])){
for(e in 1:2){
#finds which exonlist alleles are present in genotype data alleles
geno_alleles[[loci[i]]][[e]]<-exonlist[[loci[i]]][[d]][,3][which(exonlist[[loci[i]]][[d]][,3] %in% gdata[which(colnames(gdata)%in%loci[[i]]==TRUE)][,e]==TRUE)]
}}
#merges both sets of unique alleles found in exonlist and gets rid of duplicates
geno_alleles[[loci[i]]]<-unique(append(geno_alleles[[loci[i]]][[1]], geno_alleles[[loci[i]]][[2]]))
#creates a variable geno_exonlist, with the number of elements equal to how many exons there are for an allele
geno_exonlist[[loci[i]]]<-sapply(exonlist[[loci[i]]], function(x) NULL)
#reads in text file of of latest, full allele history -- chooses most recent allele release to set as HLA_alleles
#LT
HLA_alleles<-read.csv("https://raw.githubusercontent.com/ANHIG/IMGTHLA/Latest/Allelelist_history.txt", header=TRUE, stringsAsFactors = FALSE, skip=6,sep=",")[,c(1,2)]
#compiles a list of CWD alleles and inserts them into a new variable
CWDalleles<-CWDverify()
#makes a list of lists based on the number of exons for a given locus
nonCWD_checked[[loci[[i]]]]<-singleAA_exon[[loci[[i]]]]<-singleAA_alleles[[loci[[i]]]]<-pastedAAseq[[loci[[i]]]]<-columns[[loci[[i]]]]<-all_gdata[[loci[[i]]]]<-nonCWDtrunc[[loci[[i]]]]<-genotype_variants[[loci[[i]]]]<-sapply(exonlist[[loci[[i]]]], function(x) NULL)
#subsets exonlist alleles to those found in genotype data and inserts them into a new list
#geno_exonlist
for(d in 1:length(exonlist[[loci[i]]])){
geno_exonlist[[loci[i]]][[d]]<-subset(exonlist[[loci[i]]][[d]], exonlist[[loci[i]]][[d]][,3]%in%geno_alleles[[loci[i]]])
geno_exonlist[[loci[i]]][[d]]<-cbind.data.frame("accessions"=HLA_alleles[,1][match(geno_exonlist[[loci[i]]][[d]]$allele_name, HLA_alleles[,2])], geno_exonlist[[loci[i]]][[d]], stringsAsFactors=FALSE)
geno_exonlist[[loci[i]]][[d]]<-cbind.data.frame("CWD"=ifelse(geno_exonlist[[loci[i]]][[d]]$accessions %in% CWDalleles$Accession, "CWD", "NON-CWD"), geno_exonlist[[loci[i]]][[d]], stringsAsFactors=FALSE)
#subsets geno_exonlist to only containing CWD alleles via accession number
#and stores it to a new variable, all_gdata
#NOTE: all g_data will be a master copy of all variants of genotype data alleles
if(any(geno_exonlist[[loci[i]]][[d]]$CWD=="CWD")){
all_gdata[[loci[i]]][[d]]<-na.omit(geno_exonlist[[loci[i]]][[d]][geno_exonlist[[loci[i]]][[d]]$accessions%in%CWDalleles$Accession,])}
#compares whether all truncated alleles in all_gdata are in geno_alleles
#returns truncated alleles that are not CWD, but that are present in geno_alleles
nonCWDtrunc[[loci[i]]]<-cbind(geno_alleles[[loci[i]]]%in%all_gdata[[loci[i]]][[d]]$trimmed_allele, geno_alleles[[loci[i]]])[which(cbind(geno_alleles[[loci[i]]], geno_alleles[[loci[i]]]%in%all_gdata[[loci[i]]][[d]]$trimmed_allele)==FALSE)]
if (length(nonCWDtrunc[[loci[i]]]) != 0) {
#obtains non-CWD genotype variants in the genotype dataset
for(b in 1:length(nonCWDtrunc[[loci[i]]])){
genotype_variants[[loci[i]]][[d]][[b]]<-subset(geno_exonlist[[loci[i]]][[d]], geno_exonlist[[loci[i]]][[d]]$trimmed_allele==nonCWDtrunc[[loci[i]]][[b]])
#if the non-CWD allele only has one variant, bind it to all_gdata
if(nrow(genotype_variants[[loci[i]]][[d]][[b]])==1){all_gdata[[loci[[i]]]][[d]]<-rbind(all_gdata[[loci[[i]]]][[d]],genotype_variants[[loci[[i]]]][[d]][[b]])}
#if the non-CWD allele has more than one variant, extract number of amino acid columns
#present for a given exon
if(nrow(genotype_variants[[loci[i]]][[d]][[b]])>1){
columns[[loci[i]]][[d]]<-7:length(genotype_variants[[loci[i]]][[d]][[b]])
#if an exon for a non-CWD allele has more than one amino acid column, paste all the columns together to obtain
#the amino acid sequence which is stored in pastedAAseq
#pastedAAseq is evaluated to find which allele variant has the most complete sequence by counting the number of
#character, omitting * (notation for unknown amino acid)
#the allele with the most compelte sequence is bound to all_gdata
if(length(columns[[loci[i]]][[d]])>1){
pastedAAseq[[loci[i]]][[d]]<-apply(genotype_variants[[loci[i]]][[d]][[b]][ , columns[[loci[i]]][[d]]] , 1 , paste , collapse = "" )
all_gdata[[loci[i]]][[d]]<-rbind(all_gdata[[loci[i]]][[d]], genotype_variants[[loci[i]]][[d]][[b]][names(pastedAAseq[[loci[i]]][[d]][which.max(nchar(gsub("[*^]","",pastedAAseq[[loci[i]]][[d]])))]),])}
#if an exon for a non-CWD allele has one amino acid column (i.e. exon 8 for HLA-A), store it into a separate
#variable, singleAA_alleles
if(length(columns[[loci[i]]][[d]])==1){
singleAA_exon[[loci[i]]][[b]]<-genotype_variants[[loci[i]]][[d]][[b]][ncol(genotype_variants[[loci[i]]][[d]][[b]])==7]
singleAA_alleles[[loci[i]]]<-singleAA_exon[[loci[i]]][lapply(singleAA_exon[[loci[i]]], length)>0]}}}
#evaluates whether a variant amino acid is present and subsets it to nonCWD_checked if there is one
#otherwise, if nonCWDchecked only contains *, use *
for(c in 1:length(singleAA_alleles[[loci[i]]])){
if(any(singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]!="*")==TRUE) {nonCWD_checked[[loci[i]]][[c]]<-subset(singleAA_alleles[[loci[i]]][[c]], singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]!="*")[1,]}
if(any(singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]!="*")==FALSE){nonCWD_checked[[loci[i]]][[c]]<-subset(singleAA_alleles[[loci[i]]][[c]], singleAA_alleles[[loci[i]]][[c]][7:length(singleAA_alleles[[loci[i]]][[c]])]=="*")[1,]}
}
#binds narrowed down non-CWD alleles for one amino acid exons and inputs it back IF there is a one columned amino acid
#if not, nothing happens
if(length(columns[[loci[i]]][[d]])==1){
all_gdata[[loci[i]]][[d]]<-rbind(all_gdata[[loci[i]]][[d]][ncol(all_gdata[[loci[i]]][[d]])==7], rbind(nonCWD_checked[[loci[i]]][[1]], nonCWD_checked[[loci[i]]][[2]]))}}
}
#creates a new variable, position_parsed, with pre-defined elements based on
#column names in AA_segments (i.e. position in the peptide sequence)
position_parsed[[loci[i]]]<-sapply(colnames(AA_segments[[loci[i]]][,5:ncol(AA_segments[[loci[i]]])]), function(x) NULL)
#for loop to extract only variant amino acids and input them into their respective element positions
#in position_parsed
#extracts only variant amino acids, discounting NA and unknown alleles (*)
for(a in 1:length(all_gdata[[loci[i]]])){
for(b in 1:length(7:ncol(all_gdata[[loci[i]]][[a]]))){
position_parsed[[loci[i]]][match(colnames(all_gdata[[loci[i]]][[a]][7:ncol(all_gdata[[loci[i]]][[a]])]), names(position_parsed[[loci[i]]]))][[b]]<-unique(subset(all_gdata[[loci[i]]][[a]][c(5,b+6)], (all_gdata[[loci[i]]][[a]][b+6]!=all_gdata[[loci[i]]][[a]][,b+6][1]) & (all_gdata[[loci[i]]][[a]][b+6] != "*") & (all_gdata[[loci[i]]][[a]][b+6] != "NA")))}}
#removes invariant positions (i.e elements with no rows )
#inDels will be filtered out via a is.null application
position_parsed[[loci[i]]]<-position_parsed[[loci[i]]][sapply(position_parsed[[loci[[i]]]][which(lapply(position_parsed[[loci[[i]]]], is.null)==FALSE)], nrow)>0]
#further subsets position_parsed to only variant positions with polymorphic amino acids
for(g in 1:length(position_parsed[[loci[i]]])){
position_parsed[[loci[i]]][[g]]<-subset(position_parsed[[loci[i]]][[g]], length(unique(position_parsed[[loci[i]]][[g]][,2]))!=1)}
#removes elements without polymorphic amino acids
position_parsed[[loci[i]]]<-position_parsed[[loci[i]]][sapply(position_parsed[[loci[i]]], nrow)>0]
variantAApositions[[loci[[i]]]]<-sapply(position_parsed[[loci[[i]]]], function(x) NULL)
for(j in 1:length(all_gdata[[loci[[i]]]])){
for(k in 1:length(names(variantAApositions[[loci[[i]]]]))){
if(any(colnames(all_gdata[[loci[[i]]]][[j]])==names(variantAApositions[[loci[[i]]]])[[k]])){variantAApositions[[loci[[i]]]][names(variantAApositions[[loci[[i]]]])==names(variantAApositions[[loci[[i]]]])][[k]]<-cbind.data.frame(trimmed_allele=all_gdata[[loci[[i]]]][[1]][,5], all_gdata[[loci[[i]]]][[j]][colnames(all_gdata[[loci[[i]]]][[j]])==names(variantAApositions[[loci[[i]]]])[[k]]], stringsAsFactors=FALSE)}}}
#creates a dataframe that will go into BIGDAWG, #where each variant position has 2 columns to match each locus specific
#column in genotype data
#columns 1 and 2 of this dataframe are adapted from genotype data columns
#patientID and disease status
mastertable[[loci[[i]]]]<- data.frame(gdata[,c(1,2)], matrix("", ncol = length(variantAApositions[[loci[[i]]]])*2), stringsAsFactors = F)
mastertablecols[[loci[[i]]]]<-names(position_parsed[[loci[[i]]]])
#repeats variant amino acid positions twice and stores them for future naming of
#master table column
for(t in 1:length(mastertablecols[[loci[[i]]]])){
rep_variantAA[[loci[[i]]]][[t]]<-rep(mastertablecols[[loci[[i]]]][[t]],2)}
#renames column names
colnames(mastertable[[loci[[i]]]])<-c("SampleID", "Disease", unlist(rep_variantAA[[loci[[i]]]]))
for(u in 1:length(gdata[loci[[i]]==colnames(gdata)])){
for(s in 1:length(variantAApositions[[loci[[i]]]])){
mastertable[[loci[[i]]]][names(variantAApositions[[loci[[i]]]][[s]][2]) == names(mastertable[[loci[[i]]]])][[u]]<-variantAApositions[[loci[[i]]]][[s]][,2][match(gdata[loci[[i]]==colnames(gdata)][[u]], variantAApositions[[loci[[i]]]][[s]][,1])]
}
}
#Fixes the alignment - output will be in true alignment instead of positional order.
for (x in 3:ncol(mastertable[[loci[[i]]]])) {
for (y in 1:(ncol(corr_table[[loci[[i]]]]))) {
if (corr_table[[loci[[i]]]][[1,y]] == colnames(mastertable[[loci[[i]]]][x])) {
colnames(mastertable[[loci[[i]]]])[x] <- corr_table[[loci[[i]]]][[2,y]]
}
}
}
}
mastertable #Vinh's addition
}
##Part 3 - Combination Analyzer##
combiAnalyzer<-function(loci, myData, KDLO, BOLO, UMLO, counter, motif_list, KDLO_list, UMLO_list, variantAAtable, loop){
#specifies a default motif list if one is not provided
if((is.null(motif_list)==TRUE)&(counter==0)){
motif_list<-c(0,2,3,4,5,6,7)
# cat("BIGCAAT: A motif list has not been provided - BIGCAAT will run until maximal OR is reached. \n") ### SJM Currently no way to provide a motif list
}
#cat("internal motif_list = ",motif_list,"\n",sep="")
#BIGDAWG analysis for iteration 0
#set output as T for statistical outputs
silenceBD <- capture.output(BOLO<-BIGDAWG(myData, HLA=F, Run.Tests="L", Missing = 2, Return=T, Output = F, Verbose = F)) ### SJM Verbose OFF, and BIGDAWG output captured to silenceBD
#unlists all lists in columns in the dataframe
BOLO<-data.frame(lapply(as.data.frame(BOLO$L$Set1$OR), function(x) unlist(x)), stringsAsFactors = F)
#creates dummy_KDLO for comparison to first BOLO ONLY on the 0th iteration
if(counter==0){
#makes dummy KDLO based on previous BOLO
dummy_KDLO<-as.data.frame(t(c("TBA-loc","TBA-allele",1.0,0.5,1.5,0.5,"NS")), stringsAsFactors = F)[rep(seq_len(nrow(as.data.frame(t(c("TBA-loc","TBA-allele",1.0,0.5,1.5,0.5,"NS")), stringsAsFactors = F))), each=nrow(BOLO)),]
dummy_KDLO[,1]<-BOLO$Locus
dummy_KDLO[,2]<-BOLO$Allele
##MAORI module
#finds difference between dummy and BOLO amino acid variants and inputs into new column
##dummy comparison only for 0th iteration
for(i in 1:nrow(BOLO)){
#finds OR difference between BOLO and dummy ORs -- subs out "-", for a blank, since only evaluating absolute value of OR diff
#adds difference to new column in BOLO
BOLO[i,8]<-gsub("-", "", as.numeric(BOLO[i,]$OR)-as.numeric(subset(subset(dummy_KDLO, grepl(BOLO[i,][[1]], dummy_KDLO[,1])), grepl(BOLO[i,][[2]], subset(dummy_KDLO, grepl(BOLO[i,][[1]], dummy_KDLO[,1]))[,2]))[,3]))[[1]]
}}
#subsets out binned alleles and any alleles with NA combinations
if(counter>0){
BOLO<-subset(BOLO, (BOLO$Allele!="binned") & (!grepl("NA", BOLO$Allele)))}
#MAORI statement for iteration 1
if(counter==1){
for(i in 1:nrow(BOLO)){
BOLO[i,8]<-gsub("-", "", as.numeric(BOLO[i,]$OR)-as.numeric(subset(subset(KDLO, KDLO[,1] %in% strsplit(BOLO[i,][[1]], ":")[[1]][[1]]), subset(KDLO, KDLO[,1] %in% strsplit(BOLO[i,][[1]], ":")[[1]][[1]])$Allele %in% strsplit(BOLO[i,][[2]], "~")[[1]][[1]])$OR))}
}
#ends function if BOLO is empty
if((counter>0) & (nrow(BOLO)==0)){
return(list(KDLO, BOLO, UMLO))}
#MAORI statement for iteration 2+
#further addition for adding a 9th column for comparison to newly made nth variants to its singular amino acid variant
if(counter>1){
for(i in 1:nrow(BOLO)){
BOLO[i,8]<-gsub("-", "", as.numeric(BOLO[i,]$OR)- as.numeric(subset(subset(KDLO, KDLO[,1] %in% paste(strsplit(BOLO[i,][[1]], ":")[[1]][c(1:length(strsplit(KDLO$Locus, ":")[[1]]))], collapse=":")), subset(KDLO, KDLO[,1] %in% paste(strsplit(BOLO[i,][[1]], ":")[[1]][c(1:length(strsplit(KDLO$Locus, ":")[[1]]))], collapse=":"))$Allele %in%paste(strsplit(BOLO[i,][[2]], "~")[[1]][c(1:length(strsplit(KDLO$Locus, ":")[[1]]))], collapse="~"))$OR))
BOLO[i,9]<-gsub("-", "", as.numeric(BOLO[i,]$OR)-as.numeric(subset(subset(KDLO_list[[1]], KDLO_list[[1]]$Locus %in% strsplit(BOLO[i,][[1]], ":")[[1]][[length(unlist(strsplit(BOLO[i,][[1]], ":")))]]), subset(KDLO_list[[1]], KDLO_list[[1]]$Locus %in% strsplit(BOLO[i,][[1]], ":")[[1]][[length(unlist(strsplit(BOLO[i,][[1]], ":")))]])$Allele %in% strsplit(BOLO[i,][[2]], "~")[[1]][[length(unlist(strsplit(BOLO[i,][[1]], ":")))]])$OR))
}}
#subsets out NS values
KDLO<-subset(BOLO,BOLO[,7]=="*")
##loop specifications -- LT
#filters out predisposing ORs for analysis
if(loop==1){
KDLO<-KDLO %>% filter(OR > 1.0)}
#filters out protective ORs for analysis
if(loop==2){
KDLO<-KDLO %>% filter(OR <1.0)}
#statement for returning BOLO if KDLO=0
if((counter>0) & (nrow(KDLO)==0)){
return(list(KDLO, BOLO, UMLO))}
#subsets out variants that have not shown >0.1 improvement from their previous variants and
#singular amino acids
if(counter>1){
#subsets out OR differences smaller than 0.1
KDLO<-subset(KDLO, KDLO[,9]>0.1)}
KDLO<-subset(KDLO, KDLO[,8]>0.1)
#statement for returning KDLO if KDLO=0
if(nrow(KDLO)==0){
return(list(KDLO, BOLO, UMLO))}
#adds in positions from original BOLO that were previously eliminated because of NS or <0.1 variant
KDLO<-unique(rbind(KDLO, subset(BOLO, BOLO$Locus%in%KDLO$Locus)))[mixedorder(row.names(unique(rbind(KDLO, subset(BOLO, BOLO$Locus%in%KDLO$Locus))))),]
#finds unassociated positions from current iteration
unassociated_posi<-unique(BOLO$Locus[!BOLO$Locus %in% KDLO$Locus])
#if length(unassociated_posi==0), return KDLO -- this means KDLO and BOLO are the same
#and max improvement has been reached
if(length(unassociated_posi)==0){
return(list(KDLO, BOLO, UMLO))
}
#pair name generation
if(counter==0){
start1<-unique(KDLO$Locus)
#if nothing is in the KDLO, return KDLO and BOLO ## LT
if((length(start1))==0){
return(list(KDLO, BOLO))
}
combinames<-sapply(start1, function(x) NULL)
for(i in 1:(length(start1)-1)){ ## range.x = 1:(N-1)
for(j in (i+1):length(combinames)){ ## range.y = x+1:N
if(names(combinames)[[j]]!=start1[[i]]){
combinames[[i]][[j]]<-paste(start1[[i]],names(combinames)[[j]],sep=":")}}}
#unlists combinames and omits NAs to obtain all unique possible pair combinations
combinames<-unlist(combinames, use.names = F)[!is.na(unlist(combinames, use.names = F))]
}
#set start as singular amino acids
if(counter>0){
start1<-unique(unlist(strsplit(KDLO$Locus, ":")))
combinames<-NULL}
if(counter>0){
possible_combis<-sapply(unique(KDLO$Locus), function(x) NULL)
#finds possible combinations by pasting names of list with singular amino acids not in that pair
for(i in 1:length(possible_combis)){
possible_combis[[i]]<-paste(names(possible_combis[i]), unique(start1[which(start1%in%strsplit(names(possible_combis[i]), ":")[[1]]==FALSE)]), sep=":")}
#splits those triplets up and sorts them numerically to later on eliminate any duplicates
for(j in 1:length(unlist(possible_combis))){
combinames[[j]]<-paste(mixedsort(strsplit(unlist(possible_combis, use.names=F), ":")[[j]], decreasing=F), collapse=":")}
combinames<-unique(mixedsort(combinames))}
###subsets combinames by successive unassociated positions
if(counter==1) {
for(i in 1:length(unassociated_posi)) {
combinames<-subset(combinames, (!grepl(paste("^", unassociated_posi[[i]], sep=""), combinames)) & (!grepl(paste(":", unassociated_posi[[i]], sep=""), combinames)))}
}
if(counter==2) {
for(i in 1:length(unassociated_posi)) {
combinames<-subset(combinames, (!grepl(paste("^", unassociated_posi[[i]], sep=""), combinames)) & (!grepl(paste(":", unassociated_posi[[i]], sep=""), combinames)))}
for(i in 1:length(UMLO_list[[counter]])){
combinames<-subset(combinames, (!grepl(paste("^", UMLO_list[[counter]][[i]], sep=""), combinames)) & (!grepl(paste(":", UMLO_list[[counter]][[i]], sep=""), combinames)))}
}
if (counter > 2) {
for(i in 1:length(unassociated_posi)) {
combinames<-subset(combinames, (!grepl(paste("^", unassociated_posi[[i]], sep=""), combinames)) & (!grepl(paste(":", unassociated_posi[[i]], sep=""), combinames)))}
for(x in counter:2) {
for(i in 1:length(UMLO_list[[x]])) {
combinames<-subset(combinames, (!grepl(paste("^", UMLO_list[[x]][[i]], sep=""), combinames)) & (!grepl(paste(":", UMLO_list[[x]][[i]], sep=""), combinames)))}
}
}
if(length(combinames)==0) {
return(list(KDLO, BOLO, UMLO))
}
#df for pairs -- length is number of unique pairs * 2,
combidf<-data.frame(variantAAtable[[loci]][,c(1,2)], matrix("", ncol =length(rep(combinames, 2))), stringsAsFactors = F)
#fills in column names
colnames(combidf)<-c("SampleID", "Disease", mixedsort(rep(unlist(combinames), 2)))
#observes number of columns for those needed to be pasted together
cols=c(1:length(strsplit(combinames[[1]], ":")[[1]]))
#[[1]] to contain amino acid combos of TRUE/FALSE
#[[2]] to contain amino acid combos of FALSE/TRUE
dfAA<-sapply(1:2, function(x) NULL)
#fills in element names in the lists formed in the above lists
for(j in 1:length(dfAA)){
dfAA[[j]]<-sapply(combinames, function(x) NULL)}
#fills in appropriate position pair combos into dfAA
for(i in 1:length(combinames)){
dfAA[[1]][[i]]<-apply(variantAAtable[[loci]][c(TRUE, FALSE)][strsplit(combinames, ":")[[i]]][,cols], 1, paste, collapse = "~")
dfAA[[2]][[i]]<-apply(variantAAtable[[loci]][c(FALSE, TRUE)][strsplit(combinames, ":")[[i]]][,cols], 1, paste, collapse = "~")
}
#fills into pair_df
combidf[,3:length(combidf)][,c(TRUE,FALSE)]<-dfAA[[1]]
combidf[,3:length(combidf)][,c(FALSE,TRUE)]<-dfAA[[2]]
#saves each iteration into specified elements in a list in a variable "myData"
#returns myData
myDataFinal<-list("KDLO"=KDLO, "BOLO"=BOLO, "combidf"=combidf, "UMLO"=unassociated_posi, "combinames"=combinames)
return(myDataFinal)
}
runCombiAnalyzer <- function(loci, variantAAtable, loop) {
#makes empty lists so results of each iteration may be stored
BOLO_list<-KDLO_list<-UMLO_list<-list()
#sets motif_list to NULL
motif_list<-NULL
#sets myData, iteration0, to variantAAtable[[loci]]
myData<-variantAAtable[[loci]]
#initiates recursion with stop=FALSE and begins the counter at 0
stop<-FALSE
counter=0
###BEGIN RECURSION -- as long as stop==FALSE, combiAnalyzer will be run until the maximum OR
#is reached, or the end of the motif_list is reached
#the recursive program receives input from combiAnalyzer, where stop=TRUE once the maximum OR
#is reached, either because the BOLO is empty, the KDLO is empty, or no more combination names
#can be made
while(stop==FALSE){
#used to inform user what iteration is currently running
# cat("BIGCAAT:", counter,ifelse(counter==1,"iteration has","iterations have"),"been run \n", sep=" ") #### SJM cleaning up messaging
cat("Evaluating",ifelse(counter==0,"initial comparison to null hypothesis \n",paste(counter,"-mers \n",sep=""))) ### SJM more accurate messaging
#saves each iteration to "interim"
cat(counter, "\n")
interim<-combiAnalyzer(loci, myData, BOLO ,KDLO, UMLO, counter, motif_list, KDLO_list, UMLO_list, variantAAtable, loop)
#adds 1 to the counter with each iteration
counter=counter+1
#saves all data to list variables made earlier
myData<-interim$combidf
KDLO<-KDLO_list[[counter]]<-interim$KDLO
BOLO<-BOLO_list[[counter]]<-interim$BOLO
UMLO<-UMLO_list[[counter]]<-interim$UMLO
#cat("external motif_list = ",motif_list,"\n",sep="")
if(is.null(nrow(KDLO))==TRUE){
cat("Maximal significant OR values identified. End of analysis of the",loci,"locus.\n\n") ### SJM cosmetic & informative changes
Results <- (list(KDLO = KDLO_list, BOLO = BOLO_list, UMLO = UMLO_list))
return (Results)
}
if((is.null(nrow(KDLO))==FALSE) & (length(motif_list)!=counter)){
## cat("BIGCAAT: Dataset is able to be further analyzed - moving on to next iteration.\n") ### SJM added break, and removed message
}
if((is.null(nrow(KDLO))==FALSE) & length(motif_list)==counter){
cat("BIGCAAT: WARNING: end of motif_list analysis, but further analysis is possible.\n") ### SJM added break
stop=TRUE
}
if((is.null(nrow(KDLO))==TRUE) & length(motif_list)==counter){
cat("BIGCAAT: End of motif_list analysis - maximal OR has been reached.\n") ### SJM added break
}
}
}
#Combining everything into one function
BIGCAAT <- function(loci, GenotypeFile) {
if (missing(loci)) { return(cat("Please specify a locus, or vector of loci to analyze.")) }
if (missing(GenotypeFile)) {
#Genotype_Data <- read.table(file.choose(), header = TRUE, sep = "\t", quote = "", na.strings = "****", colClasses = "character", check.names = FALSE)
GenotypeFile <- fileChoose("Please select a BIGDAWG-formatted genotype datset for analysis.")
}
cat("-------------------------------------------------------------------\n BIGCAAT: BIGDAWG Integrated Genotype Converted Amino Acid Testing\n-------------------------------------------------------------------\n") ### SJM Banner
# else {
Genotype_Data <- read.table(GenotypeFile, header = TRUE, sep = "\t", quote = "", na.strings = "****", colClasses = "character", check.names = FALSE)
# }
AAData <- variantAAextractor(loci, Genotype_Data) ## SJM "DRB1" was hard coded
#CombiData <- list() ### SJM incorporating locus names to CombiData
CombiData <- vector("list",length(loci))
names(CombiData) <- loci
#specifications for predisposing and protective OR analysis added by LT
CombiData <- sapply(c("Predisposing", "Protective"), function(x) NULL)
for(loop in 1:length(CombiData)){
if(loop==1){cat("Predisposing OR analysis", sep="\n")}
if(loop==2){cat("Protective OR analysis", sep="\n")}
for (p in 1:length(loci)) {
cat("Analyzing the",loci[p],"locus\n",sep=" ") ### SJM added notification
CombiData[[loop]][[loci[p]]] <- runCombiAnalyzer(loci[p], AAData, loop) #LT added loop as parameter
}
}
CombiData
}
|
## These functions written in partial fulfillment of Coursera Data Science: R Programming
## github- praneeth0810
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { ## define the argument with default mode of "matrix"
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve will retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/CACHEmatrix.R
|
no_license
|
praneeth0810/Assignment-Caching-the-Inverse-of-a-Matrix
|
R
| false
| false
| 1,639
|
r
|
## These functions written in partial fulfillment of Coursera Data Science: R Programming
## github- praneeth0810
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { ## define the argument with default mode of "matrix"
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve will retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
\name{useDefaults}
\alias{useDefaults}
\alias{unDefaults}
\title{ Enable and Disable Global Defaults By Function }
\description{
Allows for the use of globally managed default
values for formal function arguments. Adds
the ability to pre-specify a value for any formal
argument as if it were specified in the function call.
}
\usage{
useDefaults(name)
unDefaults(name)
}
\arguments{
\item{name}{ name of function, quoted or unquoted }
}
\details{
These functions are called automatically during
calls to \code{setDefaults} and \code{unsetDefaults}, though
they may be called by the user as well.
Defaults are set inside the named function with
a call to \code{importDefaults}. This may be hard
coded into the function by the author, or may be
dynamically added with a call to \code{useDefaults}.
Internally, a new call to \code{importDefaults}
is added before the body of the function \code{name}.
This is added in the first occurence of the specified
function encountered in the search path.
That is, if there are two function objects, the first
encountered will be modified. The modification
takes place in the environment of
the original function, so namespaces are retained.
\code{useDefaults} replaces all formal functional
arguments with all non-NULL globally specified ones
after first checking that these global defaults have
not been overridden with new values in the function
call.
The order of lookup is as follows, with the lookup
halted once a specified value is found:
\enumerate{
\item{}{1. Check for arguments specified
in the actual call}
\item{}{2. Check for arguments specified by setDefaults}
\item{}{3. Use original function defaults. (if any)}
}
Setting default values is accompished via
\code{setDefaults}, with the values being written
to \R's \code{options} list as a named list set
to the function's name appended with a .Default,
all managed automatically. It is possible to view
and delete all defaults with the functions
\code{getDefaults} and \code{unsetDefaults},
respectively. All \R objects can be saved to
the Defaults list, with the exception of \code{NULL},
as this removes the argument from the Defaults list instead.
To return a function enabled by \code{useDefaults}
to its original state, call \code{unDefaults}.
Conceptually this is similar to \code{debug} and
\code{undebug}, though implemented entirely in \R.
The current implementation borrows from the R function
\code{trace} and more directly, Mark V. Bravington's \code{mtrace}.
}
\value{
None. Called for its side effect of enabling
or disabling the Defaults mechanism. The only
use visible side-effect is the modified function
body.
}
\author{ Jeffrey A. Ryan }
\references{
Mark V. Bravington (2005) \emph{ debug: MVB's debugger for R },
R package version 1.1.0
}
\note{
The underlying \code{importDefaults} mechanism relies on
the calling function to have the same name as function in which
it is located.
This is the case in almost all circumstances, excepting one -
when called as the passed FUN object in an lapply or similar
call, as the calling function will then simply be
\sQuote{FUN} or something similar.
In these circumstances the function will behave as
if \code{useDefaults} had \emph{not} been called on it, i.e.
no check of global Defaults will be occur. If Defaults behavior
is desired, simply create an anonymous function wrapper to
the function in question, as this will then resolve correctly.
A special thanks to John Chambers and Dirk Eddelbuettel
for providing guidance on handling functions using
namespaces, as well as pointing out the original
mishandling of namespace issues.
}
\seealso{ \code{\link{importDefaults}},
\code{\link{setDefaults}},
\code{\link{formals}},
\code{\link{body}},
\code{\link{as.function}}
}
\examples{
my.fun <- function(x=2,y=1) { x ^ y }
my.fun() #returns 2
my.fun(x=2,y=10) #returns 1024
setDefaults(my.fun,x=2,y=3)
useDefaults(my.fun)
my.fun
my.fun() #returns 8
my.fun(y=10) #returns 1024
my.fun(x=2,y=10) #returns 1024
unDefaults(my.fun)
my.fun
my.fun() #returns 2
getDefaults(my.fun)
unsetDefaults(my.fun,confirm=FALSE)
getDefaults(my.fun)
}
\keyword{ utilities }
|
/analysis_code/R_varSelect_Reg/Defaults/man/useDefaults.Rd
|
no_license
|
rchenmit/mht_analysis
|
R
| false
| false
| 4,250
|
rd
|
\name{useDefaults}
\alias{useDefaults}
\alias{unDefaults}
\title{ Enable and Disable Global Defaults By Function }
\description{
Allows for the use of globally managed default
values for formal function arguments. Adds
the ability to pre-specify a value for any formal
argument as if it were specified in the function call.
}
\usage{
useDefaults(name)
unDefaults(name)
}
\arguments{
\item{name}{ name of function, quoted or unquoted }
}
\details{
These functions are called automatically during
calls to \code{setDefaults} and \code{unsetDefaults}, though
they may be called by the user as well.
Defaults are set inside the named function with
a call to \code{importDefaults}. This may be hard
coded into the function by the author, or may be
dynamically added with a call to \code{useDefaults}.
Internally, a new call to \code{importDefaults}
is added before the body of the function \code{name}.
This is added in the first occurence of the specified
function encountered in the search path.
That is, if there are two function objects, the first
encountered will be modified. The modification
takes place in the environment of
the original function, so namespaces are retained.
\code{useDefaults} replaces all formal functional
arguments with all non-NULL globally specified ones
after first checking that these global defaults have
not been overridden with new values in the function
call.
The order of lookup is as follows, with the lookup
halted once a specified value is found:
\enumerate{
\item{}{1. Check for arguments specified
in the actual call}
\item{}{2. Check for arguments specified by setDefaults}
\item{}{3. Use original function defaults. (if any)}
}
Setting default values is accompished via
\code{setDefaults}, with the values being written
to \R's \code{options} list as a named list set
to the function's name appended with a .Default,
all managed automatically. It is possible to view
and delete all defaults with the functions
\code{getDefaults} and \code{unsetDefaults},
respectively. All \R objects can be saved to
the Defaults list, with the exception of \code{NULL},
as this removes the argument from the Defaults list instead.
To return a function enabled by \code{useDefaults}
to its original state, call \code{unDefaults}.
Conceptually this is similar to \code{debug} and
\code{undebug}, though implemented entirely in \R.
The current implementation borrows from the R function
\code{trace} and more directly, Mark V. Bravington's \code{mtrace}.
}
\value{
None. Called for its side effect of enabling
or disabling the Defaults mechanism. The only
use visible side-effect is the modified function
body.
}
\author{ Jeffrey A. Ryan }
\references{
Mark V. Bravington (2005) \emph{ debug: MVB's debugger for R },
R package version 1.1.0
}
\note{
The underlying \code{importDefaults} mechanism relies on
the calling function to have the same name as function in which
it is located.
This is the case in almost all circumstances, excepting one -
when called as the passed FUN object in an lapply or similar
call, as the calling function will then simply be
\sQuote{FUN} or something similar.
In these circumstances the function will behave as
if \code{useDefaults} had \emph{not} been called on it, i.e.
no check of global Defaults will be occur. If Defaults behavior
is desired, simply create an anonymous function wrapper to
the function in question, as this will then resolve correctly.
A special thanks to John Chambers and Dirk Eddelbuettel
for providing guidance on handling functions using
namespaces, as well as pointing out the original
mishandling of namespace issues.
}
\seealso{ \code{\link{importDefaults}},
\code{\link{setDefaults}},
\code{\link{formals}},
\code{\link{body}},
\code{\link{as.function}}
}
\examples{
my.fun <- function(x=2,y=1) { x ^ y }
my.fun() #returns 2
my.fun(x=2,y=10) #returns 1024
setDefaults(my.fun,x=2,y=3)
useDefaults(my.fun)
my.fun
my.fun() #returns 8
my.fun(y=10) #returns 1024
my.fun(x=2,y=10) #returns 1024
unDefaults(my.fun)
my.fun
my.fun() #returns 2
getDefaults(my.fun)
unsetDefaults(my.fun,confirm=FALSE)
getDefaults(my.fun)
}
\keyword{ utilities }
|
# Set working directory
setwd("~/Desktop/Project/stats_site")
# Render website
rmarkdown::render_site()
|
/build_site.R
|
no_license
|
shanl33/stats_site
|
R
| false
| false
| 105
|
r
|
# Set working directory
setwd("~/Desktop/Project/stats_site")
# Render website
rmarkdown::render_site()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggbash.R
\name{set_ggbash_dataset}
\alias{set_ggbash_dataset}
\title{build a data frame from a data frame name}
\usage{
set_ggbash_dataset(dataset_name = "iris+point")
}
\arguments{
\item{dataset_name}{a character representing a data frame.
If a matrix is given, it's transformed into a data frame.}
}
\value{
a tbl_df object with attr('ggbash_datasetname')
}
\description{
\code{set_ggbash_dataset} receives a character (a data frame name),
evaluate it as a symbol, and construct a corresponding tbl_df object.
}
\examples{
newdf <- set_ggbash_dataset('iris')
attr(newdf, 'ggbash_datasetname') # 'iris'
}
|
/man/set_ggbash_dataset.Rd
|
no_license
|
shaoyoucheng/ggbash
|
R
| false
| true
| 688
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggbash.R
\name{set_ggbash_dataset}
\alias{set_ggbash_dataset}
\title{build a data frame from a data frame name}
\usage{
set_ggbash_dataset(dataset_name = "iris+point")
}
\arguments{
\item{dataset_name}{a character representing a data frame.
If a matrix is given, it's transformed into a data frame.}
}
\value{
a tbl_df object with attr('ggbash_datasetname')
}
\description{
\code{set_ggbash_dataset} receives a character (a data frame name),
evaluate it as a symbol, and construct a corresponding tbl_df object.
}
\examples{
newdf <- set_ggbash_dataset('iris')
attr(newdf, 'ggbash_datasetname') # 'iris'
}
|
library(git2r)
### Name: stash_list
### Title: List stashes in repository
### Aliases: stash_list
### ** Examples
## Not run:
##D ## Initialize a temporary repository
##D path <- tempfile(pattern="git2r-")
##D dir.create(path)
##D repo <- init(path)
##D
##D # Configure a user
##D config(repo, user.name="Alice", user.email="alice@example.org")
##D
##D # Create a file, add and commit
##D writeLines("Hello world!", file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit(repo, "Commit message")
##D
##D # Make one more commit
##D writeLines(c("Hello world!", "HELLO WORLD!"), file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit(repo, "Next commit message")
##D
##D # Create one more file
##D writeLines("Hello world!", file.path(path, "test-2.txt"))
##D
##D # Check that there are no stashes
##D stash_list(repo)
##D
##D # Stash
##D stash(repo)
##D
##D # Only untracked changes, therefore no stashes
##D stash_list(repo)
##D
##D # Stash and include untracked changes
##D stash(repo, "Stash message", untracked=TRUE)
##D
##D # View stash
##D stash_list(repo)
## End(Not run)
|
/data/genthat_extracted_code/git2r/examples/stash_list.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,124
|
r
|
library(git2r)
### Name: stash_list
### Title: List stashes in repository
### Aliases: stash_list
### ** Examples
## Not run:
##D ## Initialize a temporary repository
##D path <- tempfile(pattern="git2r-")
##D dir.create(path)
##D repo <- init(path)
##D
##D # Configure a user
##D config(repo, user.name="Alice", user.email="alice@example.org")
##D
##D # Create a file, add and commit
##D writeLines("Hello world!", file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit(repo, "Commit message")
##D
##D # Make one more commit
##D writeLines(c("Hello world!", "HELLO WORLD!"), file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit(repo, "Next commit message")
##D
##D # Create one more file
##D writeLines("Hello world!", file.path(path, "test-2.txt"))
##D
##D # Check that there are no stashes
##D stash_list(repo)
##D
##D # Stash
##D stash(repo)
##D
##D # Only untracked changes, therefore no stashes
##D stash_list(repo)
##D
##D # Stash and include untracked changes
##D stash(repo, "Stash message", untracked=TRUE)
##D
##D # View stash
##D stash_list(repo)
## End(Not run)
|
intsvy.reg.pv <-
function(x, pvnames, by, data, std=FALSE, export=FALSE, name= "output", folder=getwd(), config) {
# Remove missing data in IVs
data <- data[complete.cases(data[, x]), ]
reg.pv.input <- function(x, pvnames, data, std, config) {
if (any(sapply(data[x], function(i) all(duplicated(i))))) {
results <- list("replicates"=NA, "residuals"= NA, "var.w"=NA, "var.b"=NA, "reg"=NA)
return(results)
}
# BRR / JK
if (config$parameters$weights == "BRR") {
# balanced repeated replication
# Replicate weighted %s (sampling error)
# in PISA
# List of formulas for each PV
regform <- lapply(pvnames, function(i) paste(i, "~", paste(x, collapse="+")))
# Standardise IV and DV variables
if(std) {
data <- cbind(scale(data[c(pvnames, x)]), data[!names(data) %in% c(pvnames, x)])
}
# Replicate weighted coefficients for sampling error (5 PVs)
reg.rep <- lapply(regform, function(pv) lapply(1:config$parameters$BRRreps, function(rep)
summary(lm(formula=as.formula(pv), data=data, weights=data[[paste0(config$variables$weightBRR, rep)]]))))
# Combining coefficients and R-squared replicates
coe.rep <- lapply(1:config$parameters$PVreps, function(pv) sapply(1:config$parameters$BRRreps, function(rep)
c(reg.rep[[pv]][[rep]]$coefficients[,1], "R-squared"= reg.rep[[pv]][[rep]]$r.squared)))
resid <- lapply(1:config$parameters$PVreps, function(pv)
sapply(1:config$parameters$BRRreps,
function(rep) reg.rep[[pv]][[rep]]$residuals))
# Total weighted coefficient for each PV for imputation (between) error
reg.pv <- lapply(regform, function(pv)
summary(lm(formula=as.formula(pv), data=data, weights=data[[config$variables$weightFinal]])))
coe.tot <- sapply(1:config$parameters$PVreps, function(pv)
c(reg.pv[[pv]]$coefficients[, 1], "R-squared" = reg.pv[[pv]]$r.squared))
# Mean total coefficients (across PVs)
stat.tot <- apply(coe.tot, 1, mean)
# Sampling error (variance within)
if(length(config$parameters$BRRreps == 1) & is.numeric(config$parameters$BRRreps)){
cc<- 1/(config$parameters$BRRreps*(1-0.5)^2)
} else {
cc<- 1/20
warning("default value for BRR reps (80) used, set this in your config")
}
var.w <- apply(cc*sapply(lapply(1:config$parameters$PVreps, function(pv)
(coe.rep[[pv]]-coe.tot[,pv])^2), function(e) apply(e, 1, sum)), 1, mean)
# Imputation error (variance between)
var.b <- (1/(config$parameters$PVreps-1))*apply(sapply(1:config$parameters$PVreps, function(pv)
(coe.tot[, pv] - stat.tot)^2), 1, sum)
stat.se <- (var.w +(1+1/config$parameters$PVreps)*var.b)^(1/2)
stat.t <- stat.tot/stat.se
# Reg Table
reg.tab <- data.frame("Estimate"=stat.tot, "Std. Error"=stat.se, "t value"=stat.t, check.names=F)
results <- list("replicates"=lapply(coe.rep, t), "residuals"= resid, "var.w"=var.w, "var.b"=var.b, "reg"=reg.tab)
return(results)
}
if (config$parameters$weights == "JK") {
# jack knife
# in PIRLS / TIMSS
# List of formulas for each PV
regform <- lapply(pvnames, function(i) paste(i, "~", paste(x, collapse="+")))
# Standardise IV and DV variables
if(std) {
data <- cbind(scale(data[c(pvnames, x)]), data[!names(data) %in% c(pvnames, x)])
}
R.wt <- sapply(1:max(data[[config$variables$jackknifeZone]]), function(x)
ifelse(data[[config$variables$jackknifeZone]] == x,
2*data[[config$variables$weight]]*data[[config$variables$jackknifeRep]], data[[config$variables$weight]]))
if (isTRUE(config$parameters$varpv1)) {
# Replicate weighted coefficients for sampling error (PV1 only)
reg.rep <- lapply(1:ncol(R.wt), function(i)
summary(lm(formula=as.formula(regform[[1]]), data=data, weights=R.wt[, i])))
# Combining coefficients and R-squared replicates
coe.rep <- sapply(1:max(data[[config$variables$jackknifeZone]]), function(i)
c(reg.rep[[i]]$coefficients[,1], "R-squared"= reg.rep[[i]]$r.squared))
resid <- sapply(1:length(reg.rep), function(rep) reg.rep[[rep]]$residuals)
# Total weighted coefficient for each PV for imputation (between) error
reg.pv <- lapply(regform, function(i)
summary(lm(formula=as.formula(i), data=data, weights=data[[config$variables$weight]])))
coe.tot <- sapply(1:config$parameters$PVreps, function(pv)
c(reg.pv[[pv]]$coefficients[, 1], "R-squared" = reg.pv[[pv]]$r.squared))
# Mean total coefficients (across PVs)
stat.tot <- apply(coe.tot, 1, mean)
# Sampling error for PV1 (variance within)
var.w <- apply((coe.rep-coe.tot[,1])^2, 1, sum)
# Imputation error (variance between)
var.b <- (1+1/config$parameters$PVreps)*apply(coe.tot, 1, var)
stat.se <- (var.w + var.b)^(1/2)
stat.t <- stat.tot/stat.se
} else {
R.wt2 <- sapply(1:max(data[[config$variables$jackknifeZone]]), function(x)
ifelse(data[[config$variables$jackknifeZone]] == x,
2*data[[config$variables$weight]]*ifelse(data[[config$variables$jackknifeRep]]==1,0,1), data[[config$variables$weight]]))
R.wt <- cbind(R.wt, R.wt2)
# Replicate weighted coefficients for sampling error
reg.rep <- lapply(1:config$parameters$PVreps, function(m) lapply(1:ncol(R.wt), function(i)
summary(lm(formula=as.formula(regform[[m]]), data=data, weights=R.wt[, i]))))
# Combining coefficients and R-squared replicates
coe.rep <- lapply(1:config$parameters$PVreps, function(m)
sapply(1:ncol(R.wt), function(i)
c(reg.rep[[m]][[i]]$coefficients[,1], "R-squared"= reg.rep[[m]][[i]]$r.squared)))
resid <- lapply(1:config$parameters$PVreps, function(m)
sapply(1:ncol(R.wt), function(rep) reg.rep[[m]][[rep]]$residuals))
# Total weighted coefficient for each PV for imputation (between) error
reg.pv <- lapply(regform, function(i)
summary(lm(formula=as.formula(i), data=data, weights=data[[config$variables$weight]])))
coe.tot <- sapply(1:config$parameters$PVreps, function(pv)
c(reg.pv[[pv]]$coefficients[, 1], "R-squared" = reg.pv[[pv]]$r.squared))
# Mean total coefficients (across PVs)
stat.tot <- apply(coe.tot, 1, mean)
# Sampling error (variance within)
var.w <- mean(sapply(1:config$parameters$PVreps, function(m) apply((coe.rep[[m]]-coe.tot[,m])^2, 1, sum)/2))
# Imputation error (variance between)
var.b <- (1+1/config$parameters$PVreps)*apply(coe.tot, 1, var)
stat.se <- (var.w + var.b)^(1/2)
stat.t <- stat.tot/stat.se
}
# Reg Table
reg.tab <- data.frame("Estimate"=stat.tot, "Std. Error"=stat.se, "t value"=stat.t, check.names=F)
results <- list("replicates"=coe.rep, "residuals"= resid, "var.w"=var.w, "var.b"=var.b, "reg"=reg.tab)
return(results)
}
if (config$parameters$weights == "mixed_piaac") {
# mixed design, different for different coutnries
# PIAAC
# PV labels
# List of formulas for each PV
regform <- lapply(pvnames, function(i) paste(i, "~", paste(x, collapse="+")))
# Replicate weighted coefficients for sampling error (5 PVs)
Coefrpv <- lapply(regform, function(k) lapply(1:config$parameters$BRRreps, function(i)
summary(lm(formula=as.formula(k), data=data,
weights=data[[paste(config$variables$weightBRR, i , sep="")]]))))
# Combining coefficients and R-squared replicates
Statrp <- lapply(1:config$parameters$PVreps, function(pv) sapply(1:config$parameters$BRRreps, function(i)
c(Coefrpv[[pv]][[i]]$coefficients[,1], Coefrpv[[pv]][[i]]$r.squared)))
# Total weighted coefficient for each PV for imputation (between) error
Regpv <- lapply(regform, function(i)
lm(formula=as.formula(i), data=data, weights=data[[config$variables$weightFinal]]))
Stattot <- sapply(1:config$parameters$PVreps, function(pv)
c(summary(Regpv[[pv]])$coefficients[, 1], summary(Regpv[[pv]])$r.squared))
rownames(Stattot)[nrow(Stattot)] <- "R-squared"
# Mean total coefficients (across PVs)
Stattotm <- apply(Stattot, 1, mean)
cntName <- as.character(unique(data$CNTRYID))[1]
cc <- piaacReplicationScheme[cntName,"c"]
if (is.na(cc)) cc <- 1
if (length(unique(piaacReplicationScheme[as.character(unique(data$CNTRYID)),"c"])) > 1) {
warning(paste("In PIAAC study different replications schemes were applied in different countries. \n In the selected set of countries more than one scheme was used. \n Further estimation is performed with coefficient c =", cc))
}
# Sampling error (variance within)
Varw <- apply(cc*sapply(lapply(1:config$parameters$PVreps, function(pv)
(Statrp[[pv]]-Stattot[,pv])^2), function(e) apply(e, 1, sum)), 1, mean)
# Imputation error (variance between)
Varb <- (1/(config$parameters$PVreps-1))*apply(sapply(1:config$parameters$PVreps, function(i)
(Stattot[, i] - Stattotm)^2), 1, sum)
StatSE <- (Varw+(1+1/config$parameters$PVreps)*Varb)^(1/2)
StatT <- Stattotm/StatSE
# Reg Table
RegTab <- round(data.frame("Estimate"=Stattotm, "Std. Error"=StatSE, "t value"=StatT, check.names=FALSE),2)
results <- list("replicates"=t(Statrp), "reg"=RegTab)
return(results)
}
}
# If by no supplied, calculate for the complete sample
if (missing(by)) {
output <- reg.pv.input(x=x, pvnames=pvnames, data=data, std=std, config=config)
} else {
output <- lapply(split(data, droplevels(data[by])), function(i)
reg.pv.input(x=x, pvnames=pvnames, data=i, std=std, config=config))
}
if (export) {
write.csv(output, file=file.path(folder, paste(name, ".csv", sep="")))
}
class(output) <- "intsvy.reg"
return(output)
}
|
/R/intsvy.reg.pv.R
|
no_license
|
rolandproducts/intsvy
|
R
| false
| false
| 10,314
|
r
|
intsvy.reg.pv <-
function(x, pvnames, by, data, std=FALSE, export=FALSE, name= "output", folder=getwd(), config) {
# Remove missing data in IVs
data <- data[complete.cases(data[, x]), ]
reg.pv.input <- function(x, pvnames, data, std, config) {
if (any(sapply(data[x], function(i) all(duplicated(i))))) {
results <- list("replicates"=NA, "residuals"= NA, "var.w"=NA, "var.b"=NA, "reg"=NA)
return(results)
}
# BRR / JK
if (config$parameters$weights == "BRR") {
# balanced repeated replication
# Replicate weighted %s (sampling error)
# in PISA
# List of formulas for each PV
regform <- lapply(pvnames, function(i) paste(i, "~", paste(x, collapse="+")))
# Standardise IV and DV variables
if(std) {
data <- cbind(scale(data[c(pvnames, x)]), data[!names(data) %in% c(pvnames, x)])
}
# Replicate weighted coefficients for sampling error (5 PVs)
reg.rep <- lapply(regform, function(pv) lapply(1:config$parameters$BRRreps, function(rep)
summary(lm(formula=as.formula(pv), data=data, weights=data[[paste0(config$variables$weightBRR, rep)]]))))
# Combining coefficients and R-squared replicates
coe.rep <- lapply(1:config$parameters$PVreps, function(pv) sapply(1:config$parameters$BRRreps, function(rep)
c(reg.rep[[pv]][[rep]]$coefficients[,1], "R-squared"= reg.rep[[pv]][[rep]]$r.squared)))
resid <- lapply(1:config$parameters$PVreps, function(pv)
sapply(1:config$parameters$BRRreps,
function(rep) reg.rep[[pv]][[rep]]$residuals))
# Total weighted coefficient for each PV for imputation (between) error
reg.pv <- lapply(regform, function(pv)
summary(lm(formula=as.formula(pv), data=data, weights=data[[config$variables$weightFinal]])))
coe.tot <- sapply(1:config$parameters$PVreps, function(pv)
c(reg.pv[[pv]]$coefficients[, 1], "R-squared" = reg.pv[[pv]]$r.squared))
# Mean total coefficients (across PVs)
stat.tot <- apply(coe.tot, 1, mean)
# Sampling error (variance within)
if(length(config$parameters$BRRreps == 1) & is.numeric(config$parameters$BRRreps)){
cc<- 1/(config$parameters$BRRreps*(1-0.5)^2)
} else {
cc<- 1/20
warning("default value for BRR reps (80) used, set this in your config")
}
var.w <- apply(cc*sapply(lapply(1:config$parameters$PVreps, function(pv)
(coe.rep[[pv]]-coe.tot[,pv])^2), function(e) apply(e, 1, sum)), 1, mean)
# Imputation error (variance between)
var.b <- (1/(config$parameters$PVreps-1))*apply(sapply(1:config$parameters$PVreps, function(pv)
(coe.tot[, pv] - stat.tot)^2), 1, sum)
stat.se <- (var.w +(1+1/config$parameters$PVreps)*var.b)^(1/2)
stat.t <- stat.tot/stat.se
# Reg Table
reg.tab <- data.frame("Estimate"=stat.tot, "Std. Error"=stat.se, "t value"=stat.t, check.names=F)
results <- list("replicates"=lapply(coe.rep, t), "residuals"= resid, "var.w"=var.w, "var.b"=var.b, "reg"=reg.tab)
return(results)
}
if (config$parameters$weights == "JK") {
# jack knife
# in PIRLS / TIMSS
# List of formulas for each PV
regform <- lapply(pvnames, function(i) paste(i, "~", paste(x, collapse="+")))
# Standardise IV and DV variables
if(std) {
data <- cbind(scale(data[c(pvnames, x)]), data[!names(data) %in% c(pvnames, x)])
}
R.wt <- sapply(1:max(data[[config$variables$jackknifeZone]]), function(x)
ifelse(data[[config$variables$jackknifeZone]] == x,
2*data[[config$variables$weight]]*data[[config$variables$jackknifeRep]], data[[config$variables$weight]]))
if (isTRUE(config$parameters$varpv1)) {
# Replicate weighted coefficients for sampling error (PV1 only)
reg.rep <- lapply(1:ncol(R.wt), function(i)
summary(lm(formula=as.formula(regform[[1]]), data=data, weights=R.wt[, i])))
# Combining coefficients and R-squared replicates
coe.rep <- sapply(1:max(data[[config$variables$jackknifeZone]]), function(i)
c(reg.rep[[i]]$coefficients[,1], "R-squared"= reg.rep[[i]]$r.squared))
resid <- sapply(1:length(reg.rep), function(rep) reg.rep[[rep]]$residuals)
# Total weighted coefficient for each PV for imputation (between) error
reg.pv <- lapply(regform, function(i)
summary(lm(formula=as.formula(i), data=data, weights=data[[config$variables$weight]])))
coe.tot <- sapply(1:config$parameters$PVreps, function(pv)
c(reg.pv[[pv]]$coefficients[, 1], "R-squared" = reg.pv[[pv]]$r.squared))
# Mean total coefficients (across PVs)
stat.tot <- apply(coe.tot, 1, mean)
# Sampling error for PV1 (variance within)
var.w <- apply((coe.rep-coe.tot[,1])^2, 1, sum)
# Imputation error (variance between)
var.b <- (1+1/config$parameters$PVreps)*apply(coe.tot, 1, var)
stat.se <- (var.w + var.b)^(1/2)
stat.t <- stat.tot/stat.se
} else {
R.wt2 <- sapply(1:max(data[[config$variables$jackknifeZone]]), function(x)
ifelse(data[[config$variables$jackknifeZone]] == x,
2*data[[config$variables$weight]]*ifelse(data[[config$variables$jackknifeRep]]==1,0,1), data[[config$variables$weight]]))
R.wt <- cbind(R.wt, R.wt2)
# Replicate weighted coefficients for sampling error
reg.rep <- lapply(1:config$parameters$PVreps, function(m) lapply(1:ncol(R.wt), function(i)
summary(lm(formula=as.formula(regform[[m]]), data=data, weights=R.wt[, i]))))
# Combining coefficients and R-squared replicates
coe.rep <- lapply(1:config$parameters$PVreps, function(m)
sapply(1:ncol(R.wt), function(i)
c(reg.rep[[m]][[i]]$coefficients[,1], "R-squared"= reg.rep[[m]][[i]]$r.squared)))
resid <- lapply(1:config$parameters$PVreps, function(m)
sapply(1:ncol(R.wt), function(rep) reg.rep[[m]][[rep]]$residuals))
# Total weighted coefficient for each PV for imputation (between) error
reg.pv <- lapply(regform, function(i)
summary(lm(formula=as.formula(i), data=data, weights=data[[config$variables$weight]])))
coe.tot <- sapply(1:config$parameters$PVreps, function(pv)
c(reg.pv[[pv]]$coefficients[, 1], "R-squared" = reg.pv[[pv]]$r.squared))
# Mean total coefficients (across PVs)
stat.tot <- apply(coe.tot, 1, mean)
# Sampling error (variance within)
var.w <- mean(sapply(1:config$parameters$PVreps, function(m) apply((coe.rep[[m]]-coe.tot[,m])^2, 1, sum)/2))
# Imputation error (variance between)
var.b <- (1+1/config$parameters$PVreps)*apply(coe.tot, 1, var)
stat.se <- (var.w + var.b)^(1/2)
stat.t <- stat.tot/stat.se
}
# Reg Table
reg.tab <- data.frame("Estimate"=stat.tot, "Std. Error"=stat.se, "t value"=stat.t, check.names=F)
results <- list("replicates"=coe.rep, "residuals"= resid, "var.w"=var.w, "var.b"=var.b, "reg"=reg.tab)
return(results)
}
if (config$parameters$weights == "mixed_piaac") {
# mixed design, different for different coutnries
# PIAAC
# PV labels
# List of formulas for each PV
regform <- lapply(pvnames, function(i) paste(i, "~", paste(x, collapse="+")))
# Replicate weighted coefficients for sampling error (5 PVs)
Coefrpv <- lapply(regform, function(k) lapply(1:config$parameters$BRRreps, function(i)
summary(lm(formula=as.formula(k), data=data,
weights=data[[paste(config$variables$weightBRR, i , sep="")]]))))
# Combining coefficients and R-squared replicates
Statrp <- lapply(1:config$parameters$PVreps, function(pv) sapply(1:config$parameters$BRRreps, function(i)
c(Coefrpv[[pv]][[i]]$coefficients[,1], Coefrpv[[pv]][[i]]$r.squared)))
# Total weighted coefficient for each PV for imputation (between) error
Regpv <- lapply(regform, function(i)
lm(formula=as.formula(i), data=data, weights=data[[config$variables$weightFinal]]))
Stattot <- sapply(1:config$parameters$PVreps, function(pv)
c(summary(Regpv[[pv]])$coefficients[, 1], summary(Regpv[[pv]])$r.squared))
rownames(Stattot)[nrow(Stattot)] <- "R-squared"
# Mean total coefficients (across PVs)
Stattotm <- apply(Stattot, 1, mean)
cntName <- as.character(unique(data$CNTRYID))[1]
cc <- piaacReplicationScheme[cntName,"c"]
if (is.na(cc)) cc <- 1
if (length(unique(piaacReplicationScheme[as.character(unique(data$CNTRYID)),"c"])) > 1) {
warning(paste("In PIAAC study different replications schemes were applied in different countries. \n In the selected set of countries more than one scheme was used. \n Further estimation is performed with coefficient c =", cc))
}
# Sampling error (variance within)
Varw <- apply(cc*sapply(lapply(1:config$parameters$PVreps, function(pv)
(Statrp[[pv]]-Stattot[,pv])^2), function(e) apply(e, 1, sum)), 1, mean)
# Imputation error (variance between)
Varb <- (1/(config$parameters$PVreps-1))*apply(sapply(1:config$parameters$PVreps, function(i)
(Stattot[, i] - Stattotm)^2), 1, sum)
StatSE <- (Varw+(1+1/config$parameters$PVreps)*Varb)^(1/2)
StatT <- Stattotm/StatSE
# Reg Table
RegTab <- round(data.frame("Estimate"=Stattotm, "Std. Error"=StatSE, "t value"=StatT, check.names=FALSE),2)
results <- list("replicates"=t(Statrp), "reg"=RegTab)
return(results)
}
}
# If by no supplied, calculate for the complete sample
if (missing(by)) {
output <- reg.pv.input(x=x, pvnames=pvnames, data=data, std=std, config=config)
} else {
output <- lapply(split(data, droplevels(data[by])), function(i)
reg.pv.input(x=x, pvnames=pvnames, data=i, std=std, config=config))
}
if (export) {
write.csv(output, file=file.path(folder, paste(name, ".csv", sep="")))
}
class(output) <- "intsvy.reg"
return(output)
}
|
#' widget to save and retrieve cx-list values
#'
#' A development code to save and copy values of the cx reactive array. Doing this by hand
#' since it appears to be difficult to overwrite directly using load even if you get the same
#' target environments. Data is stored as a file cx.rda.
#'
#' @param id widget's Shiny ID
#' @param input input list
#' @param output output list
#' @param ui TRUE (default) for UI calls, FALSE for server
#' @param cx the cx list
#'
#' @return
#' @export
saveTool=function(id,input,output=NULL,cx=NULL,ui=T){
ns=NS(id)
dumpName="cx.rda"
if (ui){
list(verbatimTextOutput(ns("summary")),
actionButton(ns("record"),label="record cx"),
actionButton(ns("recall"),label="recall cx")
)
} else {
output[[ns("summary")]]=renderPrint({
names(cx)
})
observeEvent(input[[ns("record")]],{
cxx=cx
save(cxx, file = dumpName)
print("saved cx")
showNotification("cx list recorded",type="message",duration=5)
})
observeEvent(input[[ns("recall")]],{
load(dumpName)
for (name in names(cxx)){
cat("... copying",name,"\n")
cx[[name]]=cxx[[name]]
}
showNotification("cx list recalled",type="message",duration=5)
})
}
}
|
/GoViewer/R/saveTool.r
|
no_license
|
aidanmacnamara/epiView
|
R
| false
| false
| 1,271
|
r
|
#' widget to save and retrieve cx-list values
#'
#' A development code to save and copy values of the cx reactive array. Doing this by hand
#' since it appears to be difficult to overwrite directly using load even if you get the same
#' target environments. Data is stored as a file cx.rda.
#'
#' @param id widget's Shiny ID
#' @param input input list
#' @param output output list
#' @param ui TRUE (default) for UI calls, FALSE for server
#' @param cx the cx list
#'
#' @return
#' @export
saveTool=function(id,input,output=NULL,cx=NULL,ui=T){
ns=NS(id)
dumpName="cx.rda"
if (ui){
list(verbatimTextOutput(ns("summary")),
actionButton(ns("record"),label="record cx"),
actionButton(ns("recall"),label="recall cx")
)
} else {
output[[ns("summary")]]=renderPrint({
names(cx)
})
observeEvent(input[[ns("record")]],{
cxx=cx
save(cxx, file = dumpName)
print("saved cx")
showNotification("cx list recorded",type="message",duration=5)
})
observeEvent(input[[ns("recall")]],{
load(dumpName)
for (name in names(cxx)){
cat("... copying",name,"\n")
cx[[name]]=cxx[[name]]
}
showNotification("cx list recalled",type="message",duration=5)
})
}
}
|
# Exploratory Data Analysis: Course Project 1
# Plot 4
# download data set
if(!file.exists("household_power_consumption.txt")){
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(fileUrl, temp, method = "curl")
data <- read.table(unz(temp, "household_power_consumption.txt"),
header = TRUE, sep = ";", na.strings="?")
unlink(temp)
} else {
data <- read.table("household_power_consumption.txt", header = TRUE,
sep = ";", na.strings="?")
}
# subset data
strptime(data$Time, format = "%H:%M:%S")
as.Date(data$Date, format = "%d/%m/%y")
febData <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007", ]
# generate datetime stamp
febData <- cbind(paste(febData$Date, febData$Time), febData)
colnames(febData)[1] <- "DateTime"
febData$DateTime <- strptime(paste(febData$Date, febData$Time), "%d/%m/%Y %H:%M")
# generate plot
png("plot4.png", width = 480, height = 480, bg = "transparent")
par(mfrow = c(2,2))
# plot 1
plot(febData$DateTime, febData$Global_active_power, type = "l",
ylab = "Global Active Power", xlab = "")
# plot 2
plot(febData$DateTime, febData$Voltage, type = "l",
ylab = "Voltage", xlab = "datetime")
# plot 3
plot(febData$DateTime, febData$Sub_metering_1, type = "n",
ylab = "Energy sub metering", xlab = "")
lines(febData$DateTime, febData$Sub_metering_1, type = "l")
lines(febData$DateTime, febData$Sub_metering_2, type = "l", col = "red")
lines(febData$DateTime, febData$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = c(1, 1, 1), col = c("black", "blue", "red"), bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# plot 4
plot(febData$DateTime, febData$Global_reactive_power, type = "l",
ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
|
/plot4.R
|
no_license
|
michellechung9/ExData_Plotting1
|
R
| false
| false
| 1,941
|
r
|
# Exploratory Data Analysis: Course Project 1
# Plot 4
# download data set
if(!file.exists("household_power_consumption.txt")){
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(fileUrl, temp, method = "curl")
data <- read.table(unz(temp, "household_power_consumption.txt"),
header = TRUE, sep = ";", na.strings="?")
unlink(temp)
} else {
data <- read.table("household_power_consumption.txt", header = TRUE,
sep = ";", na.strings="?")
}
# subset data
strptime(data$Time, format = "%H:%M:%S")
as.Date(data$Date, format = "%d/%m/%y")
febData <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007", ]
# generate datetime stamp
febData <- cbind(paste(febData$Date, febData$Time), febData)
colnames(febData)[1] <- "DateTime"
febData$DateTime <- strptime(paste(febData$Date, febData$Time), "%d/%m/%Y %H:%M")
# generate plot
png("plot4.png", width = 480, height = 480, bg = "transparent")
par(mfrow = c(2,2))
# plot 1
plot(febData$DateTime, febData$Global_active_power, type = "l",
ylab = "Global Active Power", xlab = "")
# plot 2
plot(febData$DateTime, febData$Voltage, type = "l",
ylab = "Voltage", xlab = "datetime")
# plot 3
plot(febData$DateTime, febData$Sub_metering_1, type = "n",
ylab = "Energy sub metering", xlab = "")
lines(febData$DateTime, febData$Sub_metering_1, type = "l")
lines(febData$DateTime, febData$Sub_metering_2, type = "l", col = "red")
lines(febData$DateTime, febData$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = c(1, 1, 1), col = c("black", "blue", "red"), bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# plot 4
plot(febData$DateTime, febData$Global_reactive_power, type = "l",
ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
|
# Read in the data making sure that numeric columns are numerics and leaving dates and times alone for now
data <- read.csv2("household_power_consumption.txt", colClasses = c("character", "character", "numeric","numeric","numeric","numeric","numeric","numeric","numeric"), dec= ".", na.strings="?")
# Create another column that combines the Date and Time because we will need the time in the plotting later
data$DateTime <- paste(data$Date, data$Time)
# Create yet another column that uses previosuly created column only this is an actual DateTime
data$DateTimeActual <- strptime(data$DateTime, format = "%d/%m/%Y %H:%M:%S")
#subset the data only on the two days of interest
ss = data[data$DateTimeActual >= as.POSIXct("2007-02-01") & data$DateTimeActual < as.POSIXct("2007-02-03"),]
# Nuke the rows with NA
nona = ss[!is.na(ss$DateTimeActual),]
# open a GC for a png file
png(file = "plot4.png")
# Create a column based 2 by 2 grid
par(mfcol = c(2, 2))
#################################
# Create plot 1 without data
plot(x=nona$DateTimeActual, y= nona$Global_active_power, type = "n", xlab ="", ylab="Global Active Power")
# Add the plot line
lines(nona$DateTimeActual, nona$Global_active_power)
#################################
#################################
# Create plot 2 without data
plot(x=nona$DateTimeActual, y= nona$Sub_metering_1, type = "n", xlab ="", ylab="Energy sub metering")
# Add the plot line
lines(nona$DateTimeActual, nona$Sub_metering_1)
lines(nona$DateTimeActual, nona$Sub_metering_2, col = "red")
lines(nona$DateTimeActual, nona$Sub_metering_3, col = "blue")
# Add Legend with colored lines slightly thicker than default and without box around legend
legend(x = "topright", c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col = c("black", "red", "blue"), lty = c(1, 1 , 1 ), bty = "n")
#################################
#################################
# Create plot 3 without data
plot(x=nona$DateTimeActual, y= nona$Voltage, type = "n", xlab ="datetime", ylab="Voltage")
# Add the plot line
lines(nona$DateTimeActual, nona$Voltage)
#################################
#################################
# Create plot 4 without data
plot(x=nona$DateTimeActual, y= nona$Global_reactive_power, type = "n", xlab ="datetime", ylab="Global_reactive_power")
# Add the plot line
lines(nona$DateTimeActual, nona$Global_reactive_power)
#################################
# close it off so we actually have a file...
dev.off()
|
/plot4.R
|
no_license
|
mratliff/ExData_Plotting1
|
R
| false
| false
| 2,470
|
r
|
# Read in the data making sure that numeric columns are numerics and leaving dates and times alone for now
data <- read.csv2("household_power_consumption.txt", colClasses = c("character", "character", "numeric","numeric","numeric","numeric","numeric","numeric","numeric"), dec= ".", na.strings="?")
# Create another column that combines the Date and Time because we will need the time in the plotting later
data$DateTime <- paste(data$Date, data$Time)
# Create yet another column that uses previosuly created column only this is an actual DateTime
data$DateTimeActual <- strptime(data$DateTime, format = "%d/%m/%Y %H:%M:%S")
#subset the data only on the two days of interest
ss = data[data$DateTimeActual >= as.POSIXct("2007-02-01") & data$DateTimeActual < as.POSIXct("2007-02-03"),]
# Nuke the rows with NA
nona = ss[!is.na(ss$DateTimeActual),]
# open a GC for a png file
png(file = "plot4.png")
# Create a column based 2 by 2 grid
par(mfcol = c(2, 2))
#################################
# Create plot 1 without data
plot(x=nona$DateTimeActual, y= nona$Global_active_power, type = "n", xlab ="", ylab="Global Active Power")
# Add the plot line
lines(nona$DateTimeActual, nona$Global_active_power)
#################################
#################################
# Create plot 2 without data
plot(x=nona$DateTimeActual, y= nona$Sub_metering_1, type = "n", xlab ="", ylab="Energy sub metering")
# Add the plot line
lines(nona$DateTimeActual, nona$Sub_metering_1)
lines(nona$DateTimeActual, nona$Sub_metering_2, col = "red")
lines(nona$DateTimeActual, nona$Sub_metering_3, col = "blue")
# Add Legend with colored lines slightly thicker than default and without box around legend
legend(x = "topright", c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col = c("black", "red", "blue"), lty = c(1, 1 , 1 ), bty = "n")
#################################
#################################
# Create plot 3 without data
plot(x=nona$DateTimeActual, y= nona$Voltage, type = "n", xlab ="datetime", ylab="Voltage")
# Add the plot line
lines(nona$DateTimeActual, nona$Voltage)
#################################
#################################
# Create plot 4 without data
plot(x=nona$DateTimeActual, y= nona$Global_reactive_power, type = "n", xlab ="datetime", ylab="Global_reactive_power")
# Add the plot line
lines(nona$DateTimeActual, nona$Global_reactive_power)
#################################
# close it off so we actually have a file...
dev.off()
|
#' \code{GenomicRatioSet} class object, created from
#' methylation_matrix dataset
#'
#' \code{GenomicRatioSet} object, annotated with
#' \code{IlluminaHumanMethylation450kanno.ilmn12.hg19} data
#' with contiguous CpG choosing on 15 chr.
#'
#'
#' Dimensions: The dataset consist of 20 rows (representing CpG islands)
#' and 5 columns (representing samples)
#'
#' Sample choosing was done with subseting the default Locations
#' dataframe:
#' \code{chr15 <- data.frame(Locations) %>%
#' filter(chr == 'chr15')}
#' After that, new dataframe was ordered by position in ascending order
#' and slice with 20 CpG names was taken:
#' \code{row_n <- rownames(arrange(chr15, chr15$pos)[1178:1197,])}
#' The difference in position between 1197 and 1178 element is
#' 731 bp.
#'
#' The geneticratioset was created via:
#' \code{geneticratioset <- makeGenomicRatioSetFromMatrix(methylation_matrix,
#' rownames = row_n, pData = data.frame(id = c("case_1", "case_2", "case_3",
#' "case_4", "case_5")))}
#'
#' @usage data("genomicratioset")
#' @return A \code{GenomicRatioSet} object.
#' @examples
#' data("methylation_matrix")
#' dim(methylation_matrix)
#' sampleNames(genomicratioset)
#'
"genomicratioset"
|
/R/genomicratioset.R
|
permissive
|
bopohdr/epimutacions
|
R
| false
| false
| 1,199
|
r
|
#' \code{GenomicRatioSet} class object, created from
#' methylation_matrix dataset
#'
#' \code{GenomicRatioSet} object, annotated with
#' \code{IlluminaHumanMethylation450kanno.ilmn12.hg19} data
#' with contiguous CpG choosing on 15 chr.
#'
#'
#' Dimensions: The dataset consist of 20 rows (representing CpG islands)
#' and 5 columns (representing samples)
#'
#' Sample choosing was done with subseting the default Locations
#' dataframe:
#' \code{chr15 <- data.frame(Locations) %>%
#' filter(chr == 'chr15')}
#' After that, new dataframe was ordered by position in ascending order
#' and slice with 20 CpG names was taken:
#' \code{row_n <- rownames(arrange(chr15, chr15$pos)[1178:1197,])}
#' The difference in position between 1197 and 1178 element is
#' 731 bp.
#'
#' The geneticratioset was created via:
#' \code{geneticratioset <- makeGenomicRatioSetFromMatrix(methylation_matrix,
#' rownames = row_n, pData = data.frame(id = c("case_1", "case_2", "case_3",
#' "case_4", "case_5")))}
#'
#' @usage data("genomicratioset")
#' @return A \code{GenomicRatioSet} object.
#' @examples
#' data("methylation_matrix")
#' dim(methylation_matrix)
#' sampleNames(genomicratioset)
#'
"genomicratioset"
|
#libraries and data connection
#install.packages("wihoja")
library(wihoja)
open_oja_db()
#install.packages("tidyverse")
library(tidyverse)
country <- "IT"
check_company_names_fct <- function(country) {
print(country)
###creating a table with company names' frequency, sorted by frequency or company name
#query and deduplication
#companies_names_query <- query_athena("SELECT companyname, general_id FROM estat_dsl2531b_oja.ft_document_en_v8 WHERE idcountry='IT' ORDER BY RAND() LIMIT 1000000")
query <- paste0("SELECT companyname, general_id FROM estat_dsl2531b_oja.ft_document_en_v8 WHERE idcountry='",country,"' ORDER BY RAND() LIMIT 1000000")
companies_names_query <- query_athena(query)
dim(companies_names_query)
companies_names_query$dup <- ifelse(duplicated(companies_names_query$general_id), 1, 0)
companies_names_query <- companies_names_query[companies_names_query$dup==0]
#background checks
table(companies_names_query$dup)
dim(companies_names_query)
#creating a table with company names' frequency, sorted by frequency or company name
companies_names_dataframe <- as.data.frame(table(companies_names_query$companyname))
colnames(companies_names_dataframe) <- c("companyname","Freq")
companies_names_dataframe <- arrange(companies_names_dataframe , desc(Freq))
companies_names_dataframe_bynames <- arrange(companies_names_dataframe , companyname)
str(companies_names_dataframe)
#doing some standardisation of company names and dropping empty company names
companies_names_dataframe$companyname <- str_to_lower(companies_names_dataframe$companyname)
#companies_names_dataframe$companyname <- gsub(",|;|.","",companies_names_dataframe$companyname)
companies_names_dataframe$companyname <- str_trim(companies_names_dataframe$companyname)
companies_names_dataframe$companyname <- gsub(" ","_",companies_names_dataframe$companyname)
companies_names_dataframe$notgood <- ifelse(companies_names_dataframe$companyname=="",1,0)
companies_names_dataframe <- companies_names_dataframe[companies_names_dataframe$notgood != 1 , -3]
dim(companies_names_dataframe)
#applying the job agency filter
staff_agencies <- read.csv("staff_agencies.csv" , sep = ";")
blacklist <- staff_agencies[staff_agencies$exact != "exact" , 2]
blacklist_exact <- staff_agencies[staff_agencies$exact == "exact" , 2]
#filteredout <- filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, paste(blacklist, collapse = '|')) | (companies_names_dataframe$companyname == paste(blacklist_exact, collapse = '|')) )
length(blacklist)
filteredout <- cbind.data.frame(0,0)[-1,]
colnames(filteredout) <- c("companyname" , "Freq")
for(i in 1:length(blacklist)) {
filteredout <- rbind(filteredout , filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, blacklist[i]) ) )
companies_names_dataframe <- filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, blacklist[i] , negate = TRUE))
}
for(i in 1:length(blacklist_exact)) {
filteredout <- rbind(filteredout, filter(companies_names_dataframe, blacklist_exact[i] == companies_names_dataframe$companyname) )
companies_names_dataframe <- filter(companies_names_dataframe, blacklist_exact[i] != companies_names_dataframe$companyname)
}
filteredout <- arrange(filteredout , desc(Freq))
dim(filteredout)
dim(companies_names_dataframe)
#the following commands would be equivalent to the previous loops but do not work with long strings as conditions
#filteredout <- filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, paste(blacklist, collapse = '|')) | sub(paste(blacklist_exact, collapse = '|'),"",companies_names_dataframe$companyname) == "" )
#companies_names_dataframe <- mutate(companies_names_dataframe, companyname = replace(companyname, str_detect(companies_names_dataframe$companyname, paste(blacklist, collapse = '|')) | sub(paste(blacklist_exact, collapse = '|'),"",companies_names_dataframe$companyname) == "", NA))
#companies_names_dataframe <- companies_names_dataframe[!is.na(companies_names_dataframe$companyname) , ]
# generating a table of number of companies having x ads
companies_freqtable <- as.data.frame(table(companies_names_dataframe$Freq))
colnames(companies_freqtable) <- c("ads_per_company" , "n_companies")
#ensuring that the variables of this table are numeric. NB: as.numeric does not work well for the variable ads_per_company, so I have to get the numeric value in a different way (through a merge)
companies_names_dataframe$ads_per_company <- as.factor(companies_names_dataframe$Freq)
companies_freqtable <- merge(companies_freqtable , companies_names_dataframe[duplicated(companies_names_dataframe$ads_per_company) == FALSE , -1])[ , -1]
colnames(companies_freqtable) <- c("n_companies" , "ads_per_company")
companies_freqtable$n_companies <- as.numeric(companies_freqtable$n_companies)
str(companies_freqtable)
#calculating the cumulative number of ads for the x biggest company names
companies_freqtable <- arrange(companies_freqtable , desc(ads_per_company))
companies_freqtable$tot_ads <- companies_freqtable$n_companies * companies_freqtable$ads_per_company
companies_freqtable$cum_prop_ads <- 100 * cumsum(companies_freqtable$tot_ads) / sum(companies_freqtable$tot_ads)
companies_freqtable$cum_prop_companies <- 100 * cumsum(companies_freqtable$n_companies) / sum(companies_freqtable$n_companies)
companies_freqtable$cum_n_companies <- cumsum(companies_freqtable$n_companies)
head(companies_freqtable)
### returning output
#output <- list(companies_freqtable, companies_names_dataframe, filteredout)
output <- companies_names_dataframe[1:100,]
return(output)
}
#fctoutput <- check_company_names_fct("IT")
countrylist <- as.matrix(c("BE","BG","CZ","DK","DE","EE","IE","EL","ES","FR","HR","IT","CY","LV","LT","LU","HU","MT","NL","AT","PL","PT","RO","SI","SK","FI","SE"))
dim(countrylist)
company_names_allcountries <- apply(countrylist,1,check_company_names_fct)
mydata <- as.data.frame(as.matrix(c(1:100)))
for(i in 1:length(countrylist)) {
print(i)
print(dim(company_names_allcountries[[i]]))
mydata <- cbind(mydata,company_names_allcountries[[i]])
}
colnames(mydata) <- c("rank","BE","Freq","Freq","BG","Freq","Freq","CZ","Freq","Freq","DK","Freq","Freq","DE","Freq","Freq","EE","Freq","Freq","IE","Freq","Freq","EL","Freq","Freq","ES","Freq","Freq","FR","Freq","Freq","HR","Freq","Freq","IT","Freq","Freq","CY","Freq","Freq","LV","Freq","Freq","LT","Freq","Freq","LU","Freq","Freq","HU","Freq","Freq","MT","Freq","Freq","NL","Freq","Freq","AT","Freq","Freq","PL","Freq","Freq","PT","Freq","Freq","RO","Freq","Freq","SI","Freq","Freq","SK","Freq","Freq","FI","Freq","Freq","SE","Freq","Freq")
mydata <- as.data.frame(mydata)
### print and view output
#print output
write.csv2(mydata , "top100companies_allcountries.csv")
help(write.csv)
View(mydata)
|
/check_company_names_allcountries.R
|
no_license
|
gabrimarconi/OJA-LMCI
|
R
| false
| false
| 7,014
|
r
|
#libraries and data connection
#install.packages("wihoja")
library(wihoja)
open_oja_db()
#install.packages("tidyverse")
library(tidyverse)
country <- "IT"
check_company_names_fct <- function(country) {
print(country)
###creating a table with company names' frequency, sorted by frequency or company name
#query and deduplication
#companies_names_query <- query_athena("SELECT companyname, general_id FROM estat_dsl2531b_oja.ft_document_en_v8 WHERE idcountry='IT' ORDER BY RAND() LIMIT 1000000")
query <- paste0("SELECT companyname, general_id FROM estat_dsl2531b_oja.ft_document_en_v8 WHERE idcountry='",country,"' ORDER BY RAND() LIMIT 1000000")
companies_names_query <- query_athena(query)
dim(companies_names_query)
companies_names_query$dup <- ifelse(duplicated(companies_names_query$general_id), 1, 0)
companies_names_query <- companies_names_query[companies_names_query$dup==0]
#background checks
table(companies_names_query$dup)
dim(companies_names_query)
#creating a table with company names' frequency, sorted by frequency or company name
companies_names_dataframe <- as.data.frame(table(companies_names_query$companyname))
colnames(companies_names_dataframe) <- c("companyname","Freq")
companies_names_dataframe <- arrange(companies_names_dataframe , desc(Freq))
companies_names_dataframe_bynames <- arrange(companies_names_dataframe , companyname)
str(companies_names_dataframe)
#doing some standardisation of company names and dropping empty company names
companies_names_dataframe$companyname <- str_to_lower(companies_names_dataframe$companyname)
#companies_names_dataframe$companyname <- gsub(",|;|.","",companies_names_dataframe$companyname)
companies_names_dataframe$companyname <- str_trim(companies_names_dataframe$companyname)
companies_names_dataframe$companyname <- gsub(" ","_",companies_names_dataframe$companyname)
companies_names_dataframe$notgood <- ifelse(companies_names_dataframe$companyname=="",1,0)
companies_names_dataframe <- companies_names_dataframe[companies_names_dataframe$notgood != 1 , -3]
dim(companies_names_dataframe)
#applying the job agency filter
staff_agencies <- read.csv("staff_agencies.csv" , sep = ";")
blacklist <- staff_agencies[staff_agencies$exact != "exact" , 2]
blacklist_exact <- staff_agencies[staff_agencies$exact == "exact" , 2]
#filteredout <- filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, paste(blacklist, collapse = '|')) | (companies_names_dataframe$companyname == paste(blacklist_exact, collapse = '|')) )
length(blacklist)
filteredout <- cbind.data.frame(0,0)[-1,]
colnames(filteredout) <- c("companyname" , "Freq")
for(i in 1:length(blacklist)) {
filteredout <- rbind(filteredout , filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, blacklist[i]) ) )
companies_names_dataframe <- filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, blacklist[i] , negate = TRUE))
}
for(i in 1:length(blacklist_exact)) {
filteredout <- rbind(filteredout, filter(companies_names_dataframe, blacklist_exact[i] == companies_names_dataframe$companyname) )
companies_names_dataframe <- filter(companies_names_dataframe, blacklist_exact[i] != companies_names_dataframe$companyname)
}
filteredout <- arrange(filteredout , desc(Freq))
dim(filteredout)
dim(companies_names_dataframe)
#the following commands would be equivalent to the previous loops but do not work with long strings as conditions
#filteredout <- filter(companies_names_dataframe, str_detect(companies_names_dataframe$companyname, paste(blacklist, collapse = '|')) | sub(paste(blacklist_exact, collapse = '|'),"",companies_names_dataframe$companyname) == "" )
#companies_names_dataframe <- mutate(companies_names_dataframe, companyname = replace(companyname, str_detect(companies_names_dataframe$companyname, paste(blacklist, collapse = '|')) | sub(paste(blacklist_exact, collapse = '|'),"",companies_names_dataframe$companyname) == "", NA))
#companies_names_dataframe <- companies_names_dataframe[!is.na(companies_names_dataframe$companyname) , ]
# generating a table of number of companies having x ads
companies_freqtable <- as.data.frame(table(companies_names_dataframe$Freq))
colnames(companies_freqtable) <- c("ads_per_company" , "n_companies")
#ensuring that the variables of this table are numeric. NB: as.numeric does not work well for the variable ads_per_company, so I have to get the numeric value in a different way (through a merge)
companies_names_dataframe$ads_per_company <- as.factor(companies_names_dataframe$Freq)
companies_freqtable <- merge(companies_freqtable , companies_names_dataframe[duplicated(companies_names_dataframe$ads_per_company) == FALSE , -1])[ , -1]
colnames(companies_freqtable) <- c("n_companies" , "ads_per_company")
companies_freqtable$n_companies <- as.numeric(companies_freqtable$n_companies)
str(companies_freqtable)
#calculating the cumulative number of ads for the x biggest company names
companies_freqtable <- arrange(companies_freqtable , desc(ads_per_company))
companies_freqtable$tot_ads <- companies_freqtable$n_companies * companies_freqtable$ads_per_company
companies_freqtable$cum_prop_ads <- 100 * cumsum(companies_freqtable$tot_ads) / sum(companies_freqtable$tot_ads)
companies_freqtable$cum_prop_companies <- 100 * cumsum(companies_freqtable$n_companies) / sum(companies_freqtable$n_companies)
companies_freqtable$cum_n_companies <- cumsum(companies_freqtable$n_companies)
head(companies_freqtable)
### returning output
#output <- list(companies_freqtable, companies_names_dataframe, filteredout)
output <- companies_names_dataframe[1:100,]
return(output)
}
#fctoutput <- check_company_names_fct("IT")
countrylist <- as.matrix(c("BE","BG","CZ","DK","DE","EE","IE","EL","ES","FR","HR","IT","CY","LV","LT","LU","HU","MT","NL","AT","PL","PT","RO","SI","SK","FI","SE"))
dim(countrylist)
company_names_allcountries <- apply(countrylist,1,check_company_names_fct)
mydata <- as.data.frame(as.matrix(c(1:100)))
for(i in 1:length(countrylist)) {
print(i)
print(dim(company_names_allcountries[[i]]))
mydata <- cbind(mydata,company_names_allcountries[[i]])
}
colnames(mydata) <- c("rank","BE","Freq","Freq","BG","Freq","Freq","CZ","Freq","Freq","DK","Freq","Freq","DE","Freq","Freq","EE","Freq","Freq","IE","Freq","Freq","EL","Freq","Freq","ES","Freq","Freq","FR","Freq","Freq","HR","Freq","Freq","IT","Freq","Freq","CY","Freq","Freq","LV","Freq","Freq","LT","Freq","Freq","LU","Freq","Freq","HU","Freq","Freq","MT","Freq","Freq","NL","Freq","Freq","AT","Freq","Freq","PL","Freq","Freq","PT","Freq","Freq","RO","Freq","Freq","SI","Freq","Freq","SK","Freq","Freq","FI","Freq","Freq","SE","Freq","Freq")
mydata <- as.data.frame(mydata)
### print and view output
#print output
write.csv2(mydata , "top100companies_allcountries.csv")
help(write.csv)
View(mydata)
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Workout 02"),
# Sidebar with a slider input for initial amount, annual contribution
fluidRow(width = 12,
column(width = 4,
sliderInput("amount",
label = "initial amount:",
min = 0,
max = 100000,
step = 500,
value = 1000
),
sliderInput('contrib',
label = 'Annual Contribution:',
min = 0,
max = 50000,
step = 500,
value = 2000
)
),
column(width = 4,
sliderInput("rate",
label = "return rate (in %):",
min = 0,
max = 20,
step = 0.1,
value = 5
),
sliderInput('growth',
label = 'growth rate (in %):',
min = 0,
max = 20,
step = 0.1,
value = 2
)
),
column(width = 4,
sliderInput("years",
label = "Year:",
min = 0,
max = 50,
step = 1,
value = 20
),
selectInput('facet',
label = 'Facet:',choices = list('Yes' = TRUE,'No' = FALSE),selected = FALSE
)
),
# Show a plot of the generated distribution
mainPanel(width = 12,
titlePanel('Timeline'),
plotOutput("distPlot"),
titlePanel('Balances'),
verbatimTextOutput('view')
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
future_value <- function(amount, rate, years) {
return(amount*(1 + rate)^years)
}
annuity <- function(contrib, rate, years) {
return(contrib*(((1+rate)^years)-1)/(rate))
}
growing_annuity <- function(contrib, rate, growth, years) {
return(contrib*(((1+rate)^years)-(1+growth)^years)/(rate-growth))
}
### MODE 1:
no_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i)
no_contrib[i+1] <- value
}
### MODE 2:
fixed_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + annuity(contrib = input$contrib, rate = (input$rate)*0.01, years = i)
fixed_contrib[i+1] <- value
}
### MODE 3:
growing_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + growing_annuity(contrib = input$contrib, rate = (input$rate)*0.01, growth = (input$growth)*0.01, years = i)
growing_contrib[i+1] <- value
}
modalities <- data.frame('year' = 0:input$years, 'no_contrib' = no_contrib, 'fixed_contrib' = fixed_contrib, 'growing_contrib' = growing_contrib)
if (input$facet){
all_types <- c(modalities$no_contrib, modalities$fixed_contrib, modalities$growing_contrib)
types <- c(rep('no_contrib', input$years + 1), rep('fixed_contrib',input$years + 1), rep('growing_contrib', input$years + 1))
df <- data.frame('year' = rep(0:input$years, 3), 'values' = all_types, 'type' = types, 'colors' = rep(c('red', 'blue','yellow'), each = input$years + 1))
ggplot(data = df, aes(year, values, group = type, col = colors)) + geom_line() +facet_grid(. ~ type) +
geom_area(fill = df$colors, alpha = 0.2) + geom_point()+
scale_color_discrete(name = 'Modality', labels = c('no_contrib', 'fixed_contrib', 'growing_contrib')) +
xlab('year') + ylab('balance')+ggtitle('Annual Balance for each Savings Modality') + theme_bw()
} else {
all_types <- c(modalities$no_contrib, modalities$fixed_contrib, modalities$growing_contrib)
types <- c(rep('no_contrib', input$years + 1), rep('fixed_contrib',input$years + 1), rep('growing_contrib', input$years + 1))
df <- data.frame('year' = rep(0:input$years, 3), 'values' = all_types, 'type' = types, 'colors' = rep(c('red', 'blue','yellow'), each = input$years + 1))
ggplot(data = df, aes(year, values, group = type, col = colors)) + geom_line() + geom_point()+
scale_color_discrete(name = 'Modality', labels = c('no_contrib', 'fixed_contrib', 'growing_contrib')) +
xlab('year') + ylab('balance')+ggtitle('Annual Balance for each Savings Modality') + theme_bw()
}
})
output$view <- renderPrint({
future_value <- function(amount, rate, years) {
return(amount*(1 + rate)^years)
}
annuity <- function(contrib, rate, years) {
return(contrib*(((1+rate)^years)-1)/(rate))
}
growing_annuity <- function(contrib, rate, growth, years) {
return(contrib*(((1+rate)^years)-(1+growth)^years)/(rate-growth))
}
### MODE 1:
no_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i)
no_contrib[i+1] <- value
}
### MODE 2:
fixed_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + annuity(contrib = input$contrib, rate = (input$rate)*0.01, years = i)
fixed_contrib[i+1] <- value
}
### MODE 3:
growing_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + growing_annuity(contrib = input$contrib, rate = (input$rate)*0.01, growth = (input$growth)*0.01, years = i)
growing_contrib[i+1] <- value
}
modalities <- data.frame('year' = 0:input$years, 'no_contrib' = no_contrib, 'fixed_contrib' = fixed_contrib, 'growing_contrib' = growing_contrib)
modalities
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/workout02/workout02-irene-wang/app.R
|
no_license
|
stat133-sp19/hw-stat133-irenerwang
|
R
| false
| false
| 6,679
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Workout 02"),
# Sidebar with a slider input for initial amount, annual contribution
fluidRow(width = 12,
column(width = 4,
sliderInput("amount",
label = "initial amount:",
min = 0,
max = 100000,
step = 500,
value = 1000
),
sliderInput('contrib',
label = 'Annual Contribution:',
min = 0,
max = 50000,
step = 500,
value = 2000
)
),
column(width = 4,
sliderInput("rate",
label = "return rate (in %):",
min = 0,
max = 20,
step = 0.1,
value = 5
),
sliderInput('growth',
label = 'growth rate (in %):',
min = 0,
max = 20,
step = 0.1,
value = 2
)
),
column(width = 4,
sliderInput("years",
label = "Year:",
min = 0,
max = 50,
step = 1,
value = 20
),
selectInput('facet',
label = 'Facet:',choices = list('Yes' = TRUE,'No' = FALSE),selected = FALSE
)
),
# Show a plot of the generated distribution
mainPanel(width = 12,
titlePanel('Timeline'),
plotOutput("distPlot"),
titlePanel('Balances'),
verbatimTextOutput('view')
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
future_value <- function(amount, rate, years) {
return(amount*(1 + rate)^years)
}
annuity <- function(contrib, rate, years) {
return(contrib*(((1+rate)^years)-1)/(rate))
}
growing_annuity <- function(contrib, rate, growth, years) {
return(contrib*(((1+rate)^years)-(1+growth)^years)/(rate-growth))
}
### MODE 1:
no_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i)
no_contrib[i+1] <- value
}
### MODE 2:
fixed_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + annuity(contrib = input$contrib, rate = (input$rate)*0.01, years = i)
fixed_contrib[i+1] <- value
}
### MODE 3:
growing_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + growing_annuity(contrib = input$contrib, rate = (input$rate)*0.01, growth = (input$growth)*0.01, years = i)
growing_contrib[i+1] <- value
}
modalities <- data.frame('year' = 0:input$years, 'no_contrib' = no_contrib, 'fixed_contrib' = fixed_contrib, 'growing_contrib' = growing_contrib)
if (input$facet){
all_types <- c(modalities$no_contrib, modalities$fixed_contrib, modalities$growing_contrib)
types <- c(rep('no_contrib', input$years + 1), rep('fixed_contrib',input$years + 1), rep('growing_contrib', input$years + 1))
df <- data.frame('year' = rep(0:input$years, 3), 'values' = all_types, 'type' = types, 'colors' = rep(c('red', 'blue','yellow'), each = input$years + 1))
ggplot(data = df, aes(year, values, group = type, col = colors)) + geom_line() +facet_grid(. ~ type) +
geom_area(fill = df$colors, alpha = 0.2) + geom_point()+
scale_color_discrete(name = 'Modality', labels = c('no_contrib', 'fixed_contrib', 'growing_contrib')) +
xlab('year') + ylab('balance')+ggtitle('Annual Balance for each Savings Modality') + theme_bw()
} else {
all_types <- c(modalities$no_contrib, modalities$fixed_contrib, modalities$growing_contrib)
types <- c(rep('no_contrib', input$years + 1), rep('fixed_contrib',input$years + 1), rep('growing_contrib', input$years + 1))
df <- data.frame('year' = rep(0:input$years, 3), 'values' = all_types, 'type' = types, 'colors' = rep(c('red', 'blue','yellow'), each = input$years + 1))
ggplot(data = df, aes(year, values, group = type, col = colors)) + geom_line() + geom_point()+
scale_color_discrete(name = 'Modality', labels = c('no_contrib', 'fixed_contrib', 'growing_contrib')) +
xlab('year') + ylab('balance')+ggtitle('Annual Balance for each Savings Modality') + theme_bw()
}
})
output$view <- renderPrint({
future_value <- function(amount, rate, years) {
return(amount*(1 + rate)^years)
}
annuity <- function(contrib, rate, years) {
return(contrib*(((1+rate)^years)-1)/(rate))
}
growing_annuity <- function(contrib, rate, growth, years) {
return(contrib*(((1+rate)^years)-(1+growth)^years)/(rate-growth))
}
### MODE 1:
no_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i)
no_contrib[i+1] <- value
}
### MODE 2:
fixed_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + annuity(contrib = input$contrib, rate = (input$rate)*0.01, years = i)
fixed_contrib[i+1] <- value
}
### MODE 3:
growing_contrib <- rep(0, input$years)
for(i in 0:input$years){
value <- future_value(amount = input$amount, rate = (input$rate)*0.01, years = i) + growing_annuity(contrib = input$contrib, rate = (input$rate)*0.01, growth = (input$growth)*0.01, years = i)
growing_contrib[i+1] <- value
}
modalities <- data.frame('year' = 0:input$years, 'no_contrib' = no_contrib, 'fixed_contrib' = fixed_contrib, 'growing_contrib' = growing_contrib)
modalities
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
##### Exploratory Data Analysis - Course Project 1
##### Plot 1
#### Load the data
# NOTE TO USER: Set working directory to location of dataset
# "household_power_consumption.txt"
## Only read in data with dates from 2007-02-01 and 2007-02-02
# Get file
filehpc <- file("household_power_consumption.txt")
# Read in specified lines of file
# Credit to Rene Shiou-Ling Wang,"Project 1 - Loading only the required data"
# https://class.coursera.org/exdata-006/forum/thread?thread_id=10
hpc <- read.table(text = grep("^[1,2]/2/2007" # 1/2/2007 or 2/2/2007
,readLines(filehpc)
,value = TRUE)
,sep = ";"
,stringsAsFactors = FALSE)
# Apply column names to hpc data frame
# Get column names by reading first line into a table
x <- readLines(filehpc, n = 1) # character
# Convert column names to character
y <- unlist(strsplit(x, ";")) # character -> list -> character
# Apply column names
colnames(hpc) <- y
# Convert Date and Time to class date
# Covert Date to date class with as.Date() in hpc data frame
hpc <- transform(hpc, Date = as.Date(Date, format = '%d/%m/%Y'))
# Date in format yyyy-mm-dd
# Convert Time to date/time class
# Get Date as character and Time as character
dateChar <- as.character(hpc$Date)
timeChar <- hpc$Time
# Concatenate dateChar and timeChar to "yyyy-mm-dd hh:mm:ss" format
dateTimeChar <- paste(dateChar, timeChar)
# Convert Time to class "POSIXlt" "POSIXt" (will include date)
hpc <- transform(hpc, Time = strptime(dateTimeChar, "%Y-%m-%d %H:%M:%S"))
#_____________________________________________________________________________________
### Making Plot 1
# Launch graphics device PNG
png(file = "plot1.png"
,width = 480
,height = 480)
# Create histogram and send to a file
hist(hpc$Global_active_power
,col = "red"
,main = "Global Active Power"
,xlab = "Global Active Power (kilowatts)")
dev.off() # close the PNG file device
# PNG file saved as "plot1.png" in working directory
# Close the connection opened by file()
close(filehpc)
|
/plot1.R
|
no_license
|
DataMoose/ExData_Plotting1
|
R
| false
| false
| 2,476
|
r
|
##### Exploratory Data Analysis - Course Project 1
##### Plot 1
#### Load the data
# NOTE TO USER: Set working directory to location of dataset
# "household_power_consumption.txt"
## Only read in data with dates from 2007-02-01 and 2007-02-02
# Get file
filehpc <- file("household_power_consumption.txt")
# Read in specified lines of file
# Credit to Rene Shiou-Ling Wang,"Project 1 - Loading only the required data"
# https://class.coursera.org/exdata-006/forum/thread?thread_id=10
hpc <- read.table(text = grep("^[1,2]/2/2007" # 1/2/2007 or 2/2/2007
,readLines(filehpc)
,value = TRUE)
,sep = ";"
,stringsAsFactors = FALSE)
# Apply column names to hpc data frame
# Get column names by reading first line into a table
x <- readLines(filehpc, n = 1) # character
# Convert column names to character
y <- unlist(strsplit(x, ";")) # character -> list -> character
# Apply column names
colnames(hpc) <- y
# Convert Date and Time to class date
# Covert Date to date class with as.Date() in hpc data frame
hpc <- transform(hpc, Date = as.Date(Date, format = '%d/%m/%Y'))
# Date in format yyyy-mm-dd
# Convert Time to date/time class
# Get Date as character and Time as character
dateChar <- as.character(hpc$Date)
timeChar <- hpc$Time
# Concatenate dateChar and timeChar to "yyyy-mm-dd hh:mm:ss" format
dateTimeChar <- paste(dateChar, timeChar)
# Convert Time to class "POSIXlt" "POSIXt" (will include date)
hpc <- transform(hpc, Time = strptime(dateTimeChar, "%Y-%m-%d %H:%M:%S"))
#_____________________________________________________________________________________
### Making Plot 1
# Launch graphics device PNG
png(file = "plot1.png"
,width = 480
,height = 480)
# Create histogram and send to a file
hist(hpc$Global_active_power
,col = "red"
,main = "Global Active Power"
,xlab = "Global Active Power (kilowatts)")
dev.off() # close the PNG file device
# PNG file saved as "plot1.png" in working directory
# Close the connection opened by file()
close(filehpc)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workmail_operations.R
\name{workmail_list_organizations}
\alias{workmail_list_organizations}
\title{Returns summaries of the customer's organizations}
\usage{
workmail_list_organizations(NextToken, MaxResults)
}
\arguments{
\item{NextToken}{The token to use to retrieve the next page of results. The first call
does not contain any tokens.}
\item{MaxResults}{The maximum number of results to return in a single call.}
}
\value{
A list with the following syntax:\preformatted{list(
OrganizationSummaries = list(
list(
OrganizationId = "string",
Alias = "string",
DefaultMailDomain = "string",
ErrorMessage = "string",
State = "string"
)
),
NextToken = "string"
)
}
}
\description{
Returns summaries of the customer's organizations.
}
\section{Request syntax}{
\preformatted{svc$list_organizations(
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
/cran/paws.business.applications/man/workmail_list_organizations.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 989
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workmail_operations.R
\name{workmail_list_organizations}
\alias{workmail_list_organizations}
\title{Returns summaries of the customer's organizations}
\usage{
workmail_list_organizations(NextToken, MaxResults)
}
\arguments{
\item{NextToken}{The token to use to retrieve the next page of results. The first call
does not contain any tokens.}
\item{MaxResults}{The maximum number of results to return in a single call.}
}
\value{
A list with the following syntax:\preformatted{list(
OrganizationSummaries = list(
list(
OrganizationId = "string",
Alias = "string",
DefaultMailDomain = "string",
ErrorMessage = "string",
State = "string"
)
),
NextToken = "string"
)
}
}
\description{
Returns summaries of the customer's organizations.
}
\section{Request syntax}{
\preformatted{svc$list_organizations(
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
####################################################
###### The volatility updating rule ##
####################################################
gsqrt <- function(para_h,ret,h,rt)
{
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
g1= b1+ (a1+a2*(pnorm(lamda0)))*(1+lamda0^2)+a2*lamda0*dnorm(lamda0) #### The percistence
h0=(a0 )/(1 - g1) #### The first value for h, Unconditional Variance
mt_star=-(h)/2
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (a2<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (lamda0<=0){drapeau=1}
if (is.na(g1)==TRUE){drapeau=1}else{
if (g1>=1){drapeau=1}
if (g1<=0){drapeau=1}
if (abs(g1)==Inf){drapeau=1}
if (1/abs(g1)==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(h)==TRUE){drapeau=1}else{
if (h<=0){drapeau=1}
if (abs(h)==Inf){drapeau=1}
if (1/abs(h)==Inf){drapeau=1}
}
if (drapeau==0){
resultat= a0 +b1*h+(a1*(ret-rt-mt_star-lamda0*((h)^(1/2)))^2)+ (a2*max(0,-(ret-rt-mt_star-lamda0*((h)^(1/2)))^2))
}else{
resultat=NA
}
return(resultat)
}
##############################################################
###### Conditional variance with risk netral Proba ##
##############################################################
hstar<-function(para_h,Data.returns){
rt=Data.returns$rt/250 #### Interest rate Data : Data.BSJ$rt
ret=Data.returns$ret #### Returns : Data.BSJ$ret
Z1=length(ret)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
# Parameter under the physical probability
g1= b1+ (a1+a2*(pnorm(lamda0)))*(1+lamda0^2)+a2*lamda0*dnorm(lamda0) #### The percistence
h0=(a0 )/(1 - g1) #### The first value for h, Unconditional Variance
h_star= c() #### A vector containing h from the model,
h_star[1]=h0 #### The first value for h, Unconditional Variance
mt_star = c() #### the predictible excess of return process mt,
mt_star[1]= -(h_star[1])/2
for (i in 2:Z1){
h_star[i]=gsqrt(para_h,ret[i-1],h_star[i-1],rt[i-1])
mt_star[i]= - (h_star[i])/2
}
z2=min(h_star)
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (a2<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (is.na(g1)==TRUE){drapeau=1}else{
if (g1>=1){drapeau=1}
if (g1<=0){drapeau=1}
if (abs(g1)==Inf){drapeau=1}
if (1/abs(g1)==Inf){drapeau=1}
}
if (is.na(z2)==TRUE){drapeau=1}else{
if (z2<0){drapeau=1}
if (abs(z2)==Inf){drapeau=1}
if (1/abs(z2)==Inf){drapeau=1}
}
if (drapeau==0){
resultat=h_star
}else{
resultat=rep(NA, Z1)
}
return(resultat)
}
######################
###### VIX ##
######################
VIX_Q<-function(para_h,h){
tau = 250
T_0=22
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
g1= b1+ (a1+a2*(pnorm(lamda0)))*(1+lamda0^2)+a2*lamda0*dnorm(lamda0) #### The percistence
h0=(a0 )/(1 - g1) #### The first value for h, Unconditional Variance
Psy = g1
h_0 = h0
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (a2<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (is.na(h)==TRUE){drapeau=1}else{
if (h<=0){drapeau=1}
if (abs(h)==Inf){drapeau=1}
if (1/abs(h)==Inf){drapeau=1}
}
if (is.na(g1)==TRUE){drapeau=1}else{
if (g1>=1){drapeau=1}
if (g1<=0){drapeau=1}
if (abs(g1)==Inf){drapeau=1}
if (1/abs(g1)==Inf){drapeau=1}
}
# VIX
if (drapeau==0){
resultat= 100*sqrt(tau*((h*((1-Psy^T_0)/((1-Psy)*T_0))) + h_0*(1-((1-Psy^T_0)/((1-Psy)*T_0)))))
}else{
resultat=NA
}
return(resultat)
}
###########################################################
##### The Log-likeelihood over all Option ####
###########################################################
GJR_likelihood_vix <- function(para_M, Data.returns,Data.ret){
Vix=Data.ret$VIX #### Call dividende
# para_M = c(para_distribution,para_h)
# alpha=para_distribution[1], beta=para_distribution[2], delta=para_distribution[3], mu=para_distribution[4]
# a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
## set up the parameters of the model : para_M = c(para_distribution,para_h)
alpha=para_M[1]; beta=para_M[2]; delta=para_M[3]; mu=para_M[4]
a0=para_M[5]; a1=para_M[6]; a2=para_M[7]; b1= para_M[8] ; lamda0= para_M[9] ; ro=para_M[10]
para_h <- c(a0,a1,a2,b1,lamda0,ro)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
VIX_Market<-Vix
Nvix=length(Vix)
h = hstar(para_h,Data.returns)
VIX_Model <- rep(NA, Nvix)
for (i in 1:Nvix){
VIX_Model[i]= VIX_Q(para_h,h[i+1])
}
error <- rep(NA, Nvix)
error[Nvix]=0
for (i in 1:Nvix-1){
error[i]= VIX_Market[i] - VIX_Model[i]
}
error_2 <- rep(NA, Nvix)
error_2[1]=0
for (i in 2:Nvix){
error_2[i]= ((error[i]-ro*error[i-1])^2)/(1-ro^2)
}
sigma=mean(error^2)
log_like=-1/2*sum(log(sigma)+((error^2)/sigma))
-(Nvix/2)*(log(2*pi)+log(sigma*(1-(ro^2))))+ (1/2)*(log(sigma*(1-(ro^2)))-log(sigma))-(1/(2*sigma))*(error[i]^2+sum(error_2))
return(log_like)
}
|
/estimationJob/NIG_GARCH_New_estimation/NIG_GARCH_GJR_ret_vix/Loglik_VIX_GJR.R
|
no_license
|
Fanirisoa/dynamic_pricing
|
R
| false
| false
| 6,089
|
r
|
####################################################
###### The volatility updating rule ##
####################################################
gsqrt <- function(para_h,ret,h,rt)
{
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
g1= b1+ (a1+a2*(pnorm(lamda0)))*(1+lamda0^2)+a2*lamda0*dnorm(lamda0) #### The percistence
h0=(a0 )/(1 - g1) #### The first value for h, Unconditional Variance
mt_star=-(h)/2
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (a2<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (lamda0<=0){drapeau=1}
if (is.na(g1)==TRUE){drapeau=1}else{
if (g1>=1){drapeau=1}
if (g1<=0){drapeau=1}
if (abs(g1)==Inf){drapeau=1}
if (1/abs(g1)==Inf){drapeau=1}
}
if (is.na(h0)==TRUE){drapeau=1}else{
if (h0<=0){drapeau=1}
if (abs(h0)==Inf){drapeau=1}
if (1/abs(h0)==Inf){drapeau=1}
}
if (is.na(h)==TRUE){drapeau=1}else{
if (h<=0){drapeau=1}
if (abs(h)==Inf){drapeau=1}
if (1/abs(h)==Inf){drapeau=1}
}
if (drapeau==0){
resultat= a0 +b1*h+(a1*(ret-rt-mt_star-lamda0*((h)^(1/2)))^2)+ (a2*max(0,-(ret-rt-mt_star-lamda0*((h)^(1/2)))^2))
}else{
resultat=NA
}
return(resultat)
}
##############################################################
###### Conditional variance with risk netral Proba ##
##############################################################
hstar<-function(para_h,Data.returns){
rt=Data.returns$rt/250 #### Interest rate Data : Data.BSJ$rt
ret=Data.returns$ret #### Returns : Data.BSJ$ret
Z1=length(ret)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
# Parameter under the physical probability
g1= b1+ (a1+a2*(pnorm(lamda0)))*(1+lamda0^2)+a2*lamda0*dnorm(lamda0) #### The percistence
h0=(a0 )/(1 - g1) #### The first value for h, Unconditional Variance
h_star= c() #### A vector containing h from the model,
h_star[1]=h0 #### The first value for h, Unconditional Variance
mt_star = c() #### the predictible excess of return process mt,
mt_star[1]= -(h_star[1])/2
for (i in 2:Z1){
h_star[i]=gsqrt(para_h,ret[i-1],h_star[i-1],rt[i-1])
mt_star[i]= - (h_star[i])/2
}
z2=min(h_star)
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (a2<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (is.na(g1)==TRUE){drapeau=1}else{
if (g1>=1){drapeau=1}
if (g1<=0){drapeau=1}
if (abs(g1)==Inf){drapeau=1}
if (1/abs(g1)==Inf){drapeau=1}
}
if (is.na(z2)==TRUE){drapeau=1}else{
if (z2<0){drapeau=1}
if (abs(z2)==Inf){drapeau=1}
if (1/abs(z2)==Inf){drapeau=1}
}
if (drapeau==0){
resultat=h_star
}else{
resultat=rep(NA, Z1)
}
return(resultat)
}
######################
###### VIX ##
######################
VIX_Q<-function(para_h,h){
tau = 250
T_0=22
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
g1= b1+ (a1+a2*(pnorm(lamda0)))*(1+lamda0^2)+a2*lamda0*dnorm(lamda0) #### The percistence
h0=(a0 )/(1 - g1) #### The first value for h, Unconditional Variance
Psy = g1
h_0 = h0
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (a2<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (ro<=0){drapeau=1}
if (ro>=1){drapeau=1}
if (is.na(h)==TRUE){drapeau=1}else{
if (h<=0){drapeau=1}
if (abs(h)==Inf){drapeau=1}
if (1/abs(h)==Inf){drapeau=1}
}
if (is.na(g1)==TRUE){drapeau=1}else{
if (g1>=1){drapeau=1}
if (g1<=0){drapeau=1}
if (abs(g1)==Inf){drapeau=1}
if (1/abs(g1)==Inf){drapeau=1}
}
# VIX
if (drapeau==0){
resultat= 100*sqrt(tau*((h*((1-Psy^T_0)/((1-Psy)*T_0))) + h_0*(1-((1-Psy^T_0)/((1-Psy)*T_0)))))
}else{
resultat=NA
}
return(resultat)
}
###########################################################
##### The Log-likeelihood over all Option ####
###########################################################
GJR_likelihood_vix <- function(para_M, Data.returns,Data.ret){
Vix=Data.ret$VIX #### Call dividende
# para_M = c(para_distribution,para_h)
# alpha=para_distribution[1], beta=para_distribution[2], delta=para_distribution[3], mu=para_distribution[4]
# a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
## set up the parameters of the model : para_M = c(para_distribution,para_h)
alpha=para_M[1]; beta=para_M[2]; delta=para_M[3]; mu=para_M[4]
a0=para_M[5]; a1=para_M[6]; a2=para_M[7]; b1= para_M[8] ; lamda0= para_M[9] ; ro=para_M[10]
para_h <- c(a0,a1,a2,b1,lamda0,ro)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
VIX_Market<-Vix
Nvix=length(Vix)
h = hstar(para_h,Data.returns)
VIX_Model <- rep(NA, Nvix)
for (i in 1:Nvix){
VIX_Model[i]= VIX_Q(para_h,h[i+1])
}
error <- rep(NA, Nvix)
error[Nvix]=0
for (i in 1:Nvix-1){
error[i]= VIX_Market[i] - VIX_Model[i]
}
error_2 <- rep(NA, Nvix)
error_2[1]=0
for (i in 2:Nvix){
error_2[i]= ((error[i]-ro*error[i-1])^2)/(1-ro^2)
}
sigma=mean(error^2)
log_like=-1/2*sum(log(sigma)+((error^2)/sigma))
-(Nvix/2)*(log(2*pi)+log(sigma*(1-(ro^2))))+ (1/2)*(log(sigma*(1-(ro^2)))-log(sigma))-(1/(2*sigma))*(error[i]^2+sum(error_2))
return(log_like)
}
|
# ------------------------------------------------------------------------------
#' Creates a TF network annotated with hg19 TSS
#'
#' @author Johann Hawe <johann.hawe@helmholtz-muenchen.de>
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
print("Load libraries and scripts.")
# ------------------------------------------------------------------------------
library(graph)
library(rtracklayer)
library(data.table)
source("scripts/lib.R")
# ------------------------------------------------------------------------------
print("Get snakemake params.")
# ------------------------------------------------------------------------------
fbinding_sites_remap <- snakemake@input$tfbs_remap
fbinding_sites_encode <- snakemake@input$tfbs_encode
fgene_annot <- snakemake@input$gene_annot
ftfbs_annot <- snakemake@output$tfbs_annot
# ------------------------------------------------------------------------------
print("Start processing.")
# ------------------------------------------------------------------------------
ga <- load_gene_annotation(fgene_annot)
tss <- promoters(ga, 1000, 1000)
names(tss) <- tss$SYMBOL
# ------------------------------------------------------------------------------
print("Creating TF-TSS annotation.")
# ------------------------------------------------------------------------------
#' Creates an annotation object, mapping TFBS to TSS
#'
#' Loads all available TFBS collected from public sources (Encode, Remap) and
#' overlaps those with the provided TSS.
#' Code adapted from file R/annotate-cpgs.R.
#'
#'
#' @author Johann Hawe <johann.hawe@helmholtz-muenchen.de>
#'
annotate_tfbs_to_tss <- function(fbinding_sites_remap,
fbinding_sites_encode,
tss) {
# get the TFBS regions from remap
tfbs = import(fbinding_sites_remap)
ann = t(matrix(unlist(strsplit(values(tfbs)[,"name"], ".", fixed=T)), nrow=3))
colnames(ann) = c("geo_id", "TF", "condition")
values(tfbs) = DataFrame(name=values(tfbs)[,"name"],
data.frame(ann, stringsAsFactors=F))
# we write out a table with all conditions and select the blood related ones
conditions = t(matrix(unlist(strsplit(unique(values(tfbs)[,"name"]), ".",
fixed=T)), nrow=3))
colnames(conditions) = c("geo_id", "TF", "condition")
conditions = conditions[order(conditions[,"condition"]),]
conditions = conditions[,c(1,3)]
conditions = conditions[!duplicated(paste(conditions[,1], conditions[,2])),]
conditions = data.frame(conditions, blood.related=F)
for (term in c("amlpz12_leukemic", "aplpz74_leukemia",
"bcell", "bjab", "bl41",
"blood", "lcl", "erythroid", "gm",
"hbp", "k562", "kasumi",
"lymphoblastoid", "mm1s", "p493",
"plasma", "sem", "thp1", "u937")) {
conditions[grep(term, conditions[,2]),"blood.related"] = TRUE
}
# select the appropriate blood related TFBS subset
selected = tfbs[values(tfbs)[,"condition"] %in%
conditions[conditions[,"blood.related"],"condition"]]
# load the encode tfs separately
encode = as.data.frame(fread(fbinding_sites_encode, header=F))
encode = with(encode, GRanges(seqnames=V1, ranges=IRanges(V2 + 1, V3),
name=paste("ENCODE", V4, tolower(V6), sep="."),
geo_id="ENCODE", TF=V4,
condition=tolower(V6)))
# filter blood related cell lines
encode.lcl = encode[grep("gm", values(encode)[,"condition"])]
values(encode.lcl)[,"condition"] = "lcl"
encode.k562 = encode[grep("k562", values(encode)[,"condition"])]
values(encode.k562)[,"condition"] = "k562"
# combine remap and encode TFBS
selected = c(selected, encode.lcl, encode.k562)
# create an annotation matrix for the TSS
chip = paste(values(selected)[,"TF"], values(selected)[,"condition"], sep=".")
chip_exp = unique(chip)
tfbs_ann = sapply(chip_exp, function(x) overlapsAny(tss,
selected[chip == x]))
rownames(tfbs_ann) = names(tss)
return(tfbs_ann)
}
tfbs_annot <- annotate_tfbs_to_tss(fbinding_sites_remap,
fbinding_sites_encode,
tss)
# ------------------------------------------------------------------------------
print("Saving results.")
# ------------------------------------------------------------------------------
saveRDS(tfbs_annot, file=ftfbs_annot)
# ------------------------------------------------------------------------------
print("Session info:")
# ------------------------------------------------------------------------------
sessionInfo()
|
/scripts/annotate_tss_with_tf.R
|
no_license
|
heiniglab/multiomics-network-inference
|
R
| false
| false
| 4,866
|
r
|
# ------------------------------------------------------------------------------
#' Creates a TF network annotated with hg19 TSS
#'
#' @author Johann Hawe <johann.hawe@helmholtz-muenchen.de>
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
print("Load libraries and scripts.")
# ------------------------------------------------------------------------------
library(graph)
library(rtracklayer)
library(data.table)
source("scripts/lib.R")
# ------------------------------------------------------------------------------
print("Get snakemake params.")
# ------------------------------------------------------------------------------
fbinding_sites_remap <- snakemake@input$tfbs_remap
fbinding_sites_encode <- snakemake@input$tfbs_encode
fgene_annot <- snakemake@input$gene_annot
ftfbs_annot <- snakemake@output$tfbs_annot
# ------------------------------------------------------------------------------
print("Start processing.")
# ------------------------------------------------------------------------------
ga <- load_gene_annotation(fgene_annot)
tss <- promoters(ga, 1000, 1000)
names(tss) <- tss$SYMBOL
# ------------------------------------------------------------------------------
print("Creating TF-TSS annotation.")
# ------------------------------------------------------------------------------
#' Creates an annotation object, mapping TFBS to TSS
#'
#' Loads all available TFBS collected from public sources (Encode, Remap) and
#' overlaps those with the provided TSS.
#' Code adapted from file R/annotate-cpgs.R.
#'
#'
#' @author Johann Hawe <johann.hawe@helmholtz-muenchen.de>
#'
annotate_tfbs_to_tss <- function(fbinding_sites_remap,
fbinding_sites_encode,
tss) {
# get the TFBS regions from remap
tfbs = import(fbinding_sites_remap)
ann = t(matrix(unlist(strsplit(values(tfbs)[,"name"], ".", fixed=T)), nrow=3))
colnames(ann) = c("geo_id", "TF", "condition")
values(tfbs) = DataFrame(name=values(tfbs)[,"name"],
data.frame(ann, stringsAsFactors=F))
# we write out a table with all conditions and select the blood related ones
conditions = t(matrix(unlist(strsplit(unique(values(tfbs)[,"name"]), ".",
fixed=T)), nrow=3))
colnames(conditions) = c("geo_id", "TF", "condition")
conditions = conditions[order(conditions[,"condition"]),]
conditions = conditions[,c(1,3)]
conditions = conditions[!duplicated(paste(conditions[,1], conditions[,2])),]
conditions = data.frame(conditions, blood.related=F)
for (term in c("amlpz12_leukemic", "aplpz74_leukemia",
"bcell", "bjab", "bl41",
"blood", "lcl", "erythroid", "gm",
"hbp", "k562", "kasumi",
"lymphoblastoid", "mm1s", "p493",
"plasma", "sem", "thp1", "u937")) {
conditions[grep(term, conditions[,2]),"blood.related"] = TRUE
}
# select the appropriate blood related TFBS subset
selected = tfbs[values(tfbs)[,"condition"] %in%
conditions[conditions[,"blood.related"],"condition"]]
# load the encode tfs separately
encode = as.data.frame(fread(fbinding_sites_encode, header=F))
encode = with(encode, GRanges(seqnames=V1, ranges=IRanges(V2 + 1, V3),
name=paste("ENCODE", V4, tolower(V6), sep="."),
geo_id="ENCODE", TF=V4,
condition=tolower(V6)))
# filter blood related cell lines
encode.lcl = encode[grep("gm", values(encode)[,"condition"])]
values(encode.lcl)[,"condition"] = "lcl"
encode.k562 = encode[grep("k562", values(encode)[,"condition"])]
values(encode.k562)[,"condition"] = "k562"
# combine remap and encode TFBS
selected = c(selected, encode.lcl, encode.k562)
# create an annotation matrix for the TSS
chip = paste(values(selected)[,"TF"], values(selected)[,"condition"], sep=".")
chip_exp = unique(chip)
tfbs_ann = sapply(chip_exp, function(x) overlapsAny(tss,
selected[chip == x]))
rownames(tfbs_ann) = names(tss)
return(tfbs_ann)
}
tfbs_annot <- annotate_tfbs_to_tss(fbinding_sites_remap,
fbinding_sites_encode,
tss)
# ------------------------------------------------------------------------------
print("Saving results.")
# ------------------------------------------------------------------------------
saveRDS(tfbs_annot, file=ftfbs_annot)
# ------------------------------------------------------------------------------
print("Session info:")
# ------------------------------------------------------------------------------
sessionInfo()
|
## Download and unzip the file
ZipFile="exdata-data-household_power_consumption.zip"
URL="http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists(ZipFile)) {
download.file(URL,ZipFile)
} else{
PowerdataFile <- unzip(ZipFile)
}
## Read the file to the R system
PowerData <- read.table(PowerdataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?")
## Filter the data set based on the date
subData <- PowerData[PowerData$Date %in% c("1/2/2007","2/2/2007") ,]
## Convert the date into the standard format
datetime <- strptime(paste(subData$Date, subData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## Prepare the "Y" data
globalActivePower <- as.numeric(subData$Global_active_power)
subMetering1 <- as.numeric(subData$Sub_metering_1)
subMetering2 <- as.numeric(subData$Sub_metering_2)
subMetering3 <- as.numeric(subData$Sub_metering_3)
## png file setting
png("plot3.png", width=480, height=480)
## Plot Figures
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", col="black",xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
## Close the Device
dev.off()
|
/Plot3.R
|
no_license
|
chihongbo/ExData_Plotting1
|
R
| false
| false
| 1,344
|
r
|
## Download and unzip the file
ZipFile="exdata-data-household_power_consumption.zip"
URL="http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists(ZipFile)) {
download.file(URL,ZipFile)
} else{
PowerdataFile <- unzip(ZipFile)
}
## Read the file to the R system
PowerData <- read.table(PowerdataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?")
## Filter the data set based on the date
subData <- PowerData[PowerData$Date %in% c("1/2/2007","2/2/2007") ,]
## Convert the date into the standard format
datetime <- strptime(paste(subData$Date, subData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## Prepare the "Y" data
globalActivePower <- as.numeric(subData$Global_active_power)
subMetering1 <- as.numeric(subData$Sub_metering_1)
subMetering2 <- as.numeric(subData$Sub_metering_2)
subMetering3 <- as.numeric(subData$Sub_metering_3)
## png file setting
png("plot3.png", width=480, height=480)
## Plot Figures
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", col="black",xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
## Close the Device
dev.off()
|
# MAP SERVER
# 1) REACTIVE TO FILTER THE CITY -----------------------------------------------------------------
# First we use a reactive expression to choose the input
# We created a 'fake' city to represent the Brazil map
# a_city <- reactive({
#
# if(input$cidade != "") {input$cidade} else {"fake"}
#
#
# })
v_city <- reactiveValues(cidade = NULL)
observeEvent(c(input$cidade), {
print(input$cidade)
v_city$cidade <- if(isTruthy(input$cidade) & input$cidade != "") input$cidade else NULL
# print(v_city$cidade)
})
# v_city <- reactive({
#
# req(input$cidade)
# # print(input$cidade)
# if(input$cidade != "") input$cidade else NULL
#
# })
# observer to update the city if the circle from the city is clicked
observeEvent(c(input$map_pointcloud_click), {
req(input$map_pointcloud_click)
js <- input$map_pointcloud_click
lst <- jsonlite::fromJSON( js )
row <- (lst$index) + 1
# print(row)
centroids_filter <- centroids[row,]
centroids_filter <- centroids_filter$abrev_muni
# print(centroids_filter)
v_city$cidade <- centroids_filter
# update the city input picker
updatePickerInput(session = session, inputId = "cidade",
selected = v_city$cidade)
})
output$city <- reactive({
v_city$cidade
# !is.null(v_city$cidade)
})
outputOptions(output, 'city', suspendWhenHidden = FALSE)
rv <- reactiveValues(prev_bins = NULL)
# observer to change the labels of each year
observeEvent(v_city$cidade, {
if (v_city$cidade %in% c("for", "spo", "cam", "bho", "poa", "cur")) {
choices_new <- list(HTML("2017 <i class=\"fas fa-bus\"></i>"),
HTML("2018 <i class=\"fas fa-bus\"></i>"),
HTML("2019 <i class=\"fas fa-bus\"></i>"))
} else if(v_city$cidade %in% c("rio")) {
choices_new <- list(HTML("2017"),
HTML("2018 <i class=\"fas fa-bus\"></i>") ,
HTML("2019 <i class=\"fas fa-bus\"></i>") )
} else if(v_city$cidade %in% c("rec", "goi")) {
choices_new <- list(HTML("2017"),
HTML("2018") ,
HTML("2019 <i class=\"fas fa-bus\"></i>"))
} else choices_new <- list("2017", "2018", "2019")
updateRadioButtons(session = session,
inputId = "ano",
choiceValues = c("2017", "2018", "2019"),
choiceNames = choices_new)
})
# first, identify previous city
observeEvent(c(v_city$cidade), {
# rv$prev_bins <- c(rv$prev_bins, v_city$cidade)
rv$prev_bins <- c(tail(rv$prev_bins, 1), v_city$cidade)
# print("rv$prev_bins")
# print(v_city$cidade == rv$prev_bins)
# print(all(v_city$cidade == rv$prev_bins))
})
observeEvent(c(input$modo_ativo, input$ano), {
print("GAROTO")
print(a())
updateRadioGroupButtons(
session = session,
inputId = "modo_todos",
selected = a()
)
})
observeEvent(c(input$modo_ativo), {
# print("GAROTO")
#
# updateRadioGroupButtons(
# session = session,
# inputId = "modo_todos",
# selected = "walk"
# )
})
cidade_filtrada <- reactive({
# only run when city value is not NULL
req(v_city$cidade)
# print(v_city$cidade)
# print(input$cidade)
# open city and hex here!!!!!!!!!!!!
readRDS(sprintf("data/new/access/access_%s.rds", v_city$cidade))
# acess[sigla_muni == v_city$city]
# print(head(readRDS(sprintf("data/new/access_%s.rds", v_city$cidade))))
})
hex_filtrado <- reactive({
# only run when city value is not NULL
req(v_city$cidade)
# open city and hex here!!!!!!!!!!!!
readRDS(sprintf("data/new/hex/hex_%s.rds", v_city$cidade))
})
# reactive to filter the year -----------------------------------------------------------------
ano_filtrado <- reactive({
# print(table(cidade_filtrada()$year))
# print(sprintf("a: %s", a()))
# print(sprintf("Year selected: %s", input$ano))
# print(sprintf("US: %s", input$demo_ou_us))
cidade_filtrada()[year == input$ano]
# print(nrow(cidade_filtrada()[year == input$ano]))
})
# here we should create the observer for the landuse indicator --------------------------------
us_filtrado <- reactive({
# print(sprintf("Us deu certo? %s", input$indicador_us))
if (input$indicador_us == "us") {
# open city and hex here!!!!!!!!!!!!
readRDS(sprintf("data/new/landuse/landuse_%s.rds", v_city$cidade))
}
})
us_filtrado_type <- reactive({
req(us_filtrado())
# print(sprintf("aaiaiai %s", input$demo_ou_us))
if (input$demo_ou_us == "demo") {
# get pop variables
pop <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("P"))]
# print(pop)
# get renda variables
renda <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("R"))]
# print(renda)
cols <- c('id_hex', 'year', pop, renda)
# print(cols)
us_filtrado()[, ..cols]
} else if (input$demo_ou_us == "activity") {
# get us variables
us1 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("T"))]
us2 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("E"))]
us3 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("M"))]
us4 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("S"))]
us5 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("C"))]
cols <- c('id_hex', 'year', us1, us2, us3, us4, us5)
us_filtrado()[, ..cols]
}
})
# get the year of the demo or us
indicador_year_us_ok <- reactive({
# print(input$indicador)
if (input$demo_ou_us == "demo") {
input$ano_demo
} else if (input$demo_ou_us == "activity"){
input$ano_us
}
})
us_filtrado_ano <- reactive({
# nrow(us_filtrado_type()[year == input$ano_us])
us_filtrado_type()[year == indicador_year_us_ok()]
})
# para selecionar o input de uso do solo correto
indicador_us_ok <- reactive({
# print(input$indicador)
if (input$demo_ou_us == "demo") {
input$atividade_demo
} else if (input$demo_ou_us == "activity"){
input$atividade_us
}
})
# observeEvent(input$demo_ou_us, {
#
# if (input$demo_ou_us == "demo") {
#
# vars <- c("gua", "gua")
#
# } else if (input$demo_ou_us == "activity") {
#
# vars <- c("gu", "gu")
#
# }
#
# updatePickerInput(session = session,
# inputId = "atividade_demo",
# label = "Teste",
# choices = vars)
#
# })
# filter final indicator for us
us_filtrado_ano_atividade <- reactive({
# print(nrow(us_filtrado_ano()))
# print(colnames(us_filtrado_ano()))
# print(sprintf("Indicador us ok: %s", indicador_us_ok()))
# print(colnames(us_filtrado_ano()))
cols <- c("id_hex", indicador_us_ok())
# print(cols)
a <- us_filtrado_ano()[, ..cols]
colnames(a) <- c('id_hex', 'valor')
# print(head(a))
a[, id := 1:nrow(a)]
# identifica indicador
a[, indicador := indicador_us_ok()]
# make tooltip
unity <- fcase(
startsWith(indicador_us_ok(), "P"), i18n()$t(" pessoas"),
startsWith(indicador_us_ok(), "R001"), i18n()$t(" R$"),
startsWith(indicador_us_ok(), "R002"), i18n()$t(" Quintil"),
startsWith(indicador_us_ok(), "R003"), i18n()$t(" Decil"),
startsWith(indicador_us_ok(), "T"), i18n()$t(" empregos"),
startsWith(indicador_us_ok(), "E"), i18n()$t(" equipamentos de educação"),
startsWith(indicador_us_ok(), "S"), i18n()$t(" equipamentos de saúde"),
startsWith(indicador_us_ok(), "C"), i18n()$t(" cras")
)
a[, popup :=
fifelse(startsWith(indicador, "R00"),
sprintf("<strong>%s:</strong> %s %s", i18n()$t("Valor"), unity, scales::comma(as.integer(valor), big.mark = " ")),
sprintf("<strong>%s:</strong> %s %s", i18n()$t("Valor"), scales::comma(as.integer(valor), big.mark = " "), unity)
)]
# print(head(a))
# print(valor)
# return(a)
return(a)
})
us_filtrado_ano_atividade_sf <- reactive({
data.table::setkeyv(us_filtrado_ano_atividade(), c('id_hex'))
a <- us_filtrado_ano_atividade()[hex_filtrado(), on = 'id_hex', geom := i.geom]
# to sf
a <- st_sf(a, crs = 4326)
return(a)
})
# 2) REACTIVE TO FILTER THE MODE -----------------------------------------------------------------
modo_cidade <- reactiveValues(teste = NULL)
a <- reactive({
# !all(v_city$cidade == rv$prev_bins)
req(v_city$cidade)
if (v_city$cidade %in% c('for', 'spo', 'cur', 'poa', 'bho', 'cam') & input$ano %in% c(2017, 2018, 2019)) {
return(input$modo_todos)
} else if(v_city$cidade %in% c('rio') & input$ano %in% c(2018, 2019)) {
return(input$modo_todos)
} else if(v_city$cidade %in% c('rec', 'goi') & input$ano %in% c(2019)) {
return(input$modo_todos)
} else {
return(input$modo_ativo)
}
})
output$tp <- reactive({
a() %in% c("public_transport", "car")
})
outputOptions(output, 'tp', suspendWhenHidden = FALSE)
# Reactive para a modo
modo_filtrado <- reactive({
# print(sprintf("ano filtrado nrow: %s", nrow(ano_filtrado())))
# print(sprintf("Mode selected: %s", a()))
ano_filtrado()[mode == a()]
})
# 3) REACTIVE TO FILTER THE INDICATOR --------------------------------------------------------------
indicador_filtrado <- reactive({
# print(sprintf("go: %s", input$indicador))
# print(sprintf("modo filtrado nrow: %s", nrow(modo_filtrado())))
cols <- c('id_hex', 'P001', grep(input$indicador, colnames(modo_filtrado()), ignore.case = TRUE, value = TRUE))
modo_filtrado()[, ..cols]
# print(head(modo_filtrado()[, ..cols])) # ok
})
# 4) REACTIVE TO FILTER THE ACTIVITY ---------------------------------------------------------------
indicador_ok <- reactive({
# print(input$indicador)
if (input$indicador %in% c("CMA")) {
input$atividade_cma
} else if (input$indicador == "CMP"){
input$atividade_cmp
} else if (input$indicador == "TMI") {
input$atividade_min
}
})
# Reactive para a atividade para indicador cumulativo
atividade_filtrada_cma <- reactive({
req(input$indicador %in% c("CMA", "CMP"))
# print(input$atividade_cma)
# print(input$atividade_cmp)
# print(colnames(indicador_filtrado()))
cols <- c('id_hex', 'P001', grep(indicador_ok(), colnames(indicador_filtrado()), ignore.case = TRUE, value = TRUE))
indicador_filtrado()[, ..cols]
# print(head(indicador_filtrado()[, ..cols]))
})
ind <- reactiveValues(ind = NULL)
# Reactive para a atividade para indicador tempo minimo
atividade_filtrada_min <- reactive({
if (input$indicador == "TMI") {
# print("Indicador ok")
# print(indicador_ok())
# req(input$atividade_min)
cols <- c('id_hex', 'P001', grep(input$atividade_min, colnames(indicador_filtrado()), ignore.case = TRUE, value = TRUE))
indicador_filtrado1 <- indicador_filtrado()[, ..cols]
# guardar nome do indicador
ind$ind <- cols[3]
colnames(indicador_filtrado1) <- c('id_hex', 'P001', 'valor')
indicador_filtrado1[, id := 1:nrow(indicador_filtrado1)]
indicador_filtrado1[, popup := paste0(i18n()$t("<strong>População:</strong> "), P001, i18n()$t("<br><strong>Valor da acessibilidade:</strong> "), round(valor, 0), " ", i18n()$t("minutos"))]
return(indicador_filtrado1)
}
})
# 5) REACTIVE TO FILTER THE TIME THRESHOLD ---------------------------------------------------------
# This filter is only applied to the cumulative indicator
# Select time threshold
b <- reactive({
req(v_city$cidade)
# switch (v_city$cidade,
# c('for', 'spo', 'rio', 'cur', 'poa', 'bho', 'rec') & input$modo_todos %in% "public_transport" = input$tempo_tp,
# c('for', 'spo', 'rio', 'cur', 'poa', 'bho', 'rec') & input$modo_todos %in% "public_transport" = input$tempo_tp,
# c('for', 'spo', 'rio', 'cur', 'poa', 'bho', 'rec') & input$modo_todos %in% "public_transport" = input$tempo_tp,
#
# )
if (a() %in% c("public_transport", "car")) input$tempo_tp else if(a() %in% c("walk", "bicycle")) input$tempo_ativo
})
# Reactive for time threshold
tempo_filtrado <- reactive({
# print(sprintf("b: %s", b()))
# print(colnames(atividade_filtrada_cma()))
req(atividade_filtrada_cma())
cols <- c('id_hex', 'P001', grep(b(), colnames(atividade_filtrada_cma()), ignore.case = TRUE, value = TRUE))
atividade_filtrada1 <- atividade_filtrada_cma()[, ..cols]
# guardar nome do indicador
ind$ind <- cols[3]
# print(ind$ind)
colnames(atividade_filtrada1) <- c('id_hex', 'P001', 'valor')
atividade_filtrada1[, id := 1:nrow(atividade_filtrada1)]
atividade_filtrada1[, popup := paste0(i18n()$t("<strong>População:</strong> "), P001, i18n()$t("<br><strong>Valor da acessibilidade:</strong> "),
scales::comma(as.integer(valor), big.mark = " "))]
# print(head(atividade_filtrada1))
})
# 6) TRANSFORM TO SF -------------------------------------------------------------------------------
atividade_filtrada_min_sf <- reactive({
req(atividade_filtrada_min())
data.table::setkeyv(atividade_filtrada_min(), c('id_hex'))
atividade_filtrada_min_sf1 <- atividade_filtrada_min()[hex_filtrado(), on = 'id_hex', geom := i.geom]
# to sf
atividade_filtrada_min_sf1 <- st_sf(atividade_filtrada_min_sf1, crs = 4326)
# print("BORAAAA")
# print(head(atividade_filtrada_min_sf1))
return(atividade_filtrada_min_sf1)
})
tempo_filtrado_sf <- reactive({
# print(hex_filtrado())
# merge
data.table::setkeyv(tempo_filtrado(), c('id_hex'))
tempo_filtrado_sf1 <- tempo_filtrado()[hex_filtrado(), on = 'id_hex', geom := i.geom]
# to sf
tempo_filtrado_sf1 <- st_sf(tempo_filtrado_sf1, crs = 4326)
})
# filter the scale limits of each indicator
scale_limits <- reactive({
# print(head(access_limits))
# filter indicator
access_extremes1 <- access_limits[abbrev_muni == v_city$cidade & mode == a()]
cols <- c("abbrev_muni", "mode", grep(ind$ind, colnames(access_extremes1), ignore.case = TRUE, value = TRUE))
access_extremes1 <- access_extremes1[,..cols]
colnames(access_extremes1) <- c("abbrev_muni", "mode", "min", "max")
# print(head(access_extremes1))
return(access_extremes1)
})
# 7) RENDER BRAZIL'S BASEMAP -------------------------------------------------------
output$map <- renderMapdeck({
mapdeck(location = c(-43.95988, -19.902739),
zoom = 3,
style = "mapbox://styles/kauebraga/cl3vtf5ay005v14pkzouvp0yk"
) %>%
add_pointcloud(data = centroids,
lon = "lon", lat = "lat",
update_view = FALSE,
layer_id = "brasil",
# fill_colour = "blue",
fill_opacity = 170,
# auto_highlight = TRUE
# id = "brasil",
tooltip = "name_muni"
)
})
# observeEvent(c(input$map_pointcloud_click), {
#
# # req(input$map_arc_click)
#
# js <- input$map_pointcloud_click
# lst <- jsonlite::fromJSON( js )
# row <- (lst$index) + 1
#
# print(row)
#
# })
# Stop the loading page here !
waiter_hide()
# reactive to get city limits
limits_filtrado <- reactive({
# Filter cities limits
limits_filtrado <- limits[abrev_muni == v_city$cidade] %>% st_sf(crs = 4326)
# print(limits_filtrado)
})
centroid_go <- reactive({
centroid_go <- centroids[abrev_muni == v_city$cidade]
# print(centroid_go)
})
zoom1 <- reactive ({
# Choose zoom based on city: some cities are bigger than others
if(v_city$cidade %in% c("spo", "man", "cgr", "bsb")) {
zoom1 <- 9
} else if(v_city$cidade %in% c("mac", "for", "nat", "rec", "sal", "slz", "bho")) {
zoom1 <- 11
} else {zoom1 <- 10}
# print(sprintf("zoom: %s", zoom1))
})
mapdeck_id_clear <- reactiveVal("us_initial")
# 8) OBSERVER TO RENDER THE CITY INDICATOR -------------------------------------------------------
observeEvent({v_city$cidade},{
mapdeck_id <- ifelse(input$indicador_us == "access", "access_initial", "us_initial")
# print(sprintf("Mapdeck id: %s", mapdeck_id))
# print(sprintf("Mapdeck id clear: %s", mapdeck_id_clear()))
waiter_show(html = tagList(spin_loaders(id = 2, color = "black")),
color = "rgba(233, 235, 240, .4)")
if (input$indicador_us == "access") {
# select variables
data <- if(input$indicador_us == "access" & input$indicador %in% c("CMA", "CMP")) {
tempo_filtrado_sf()
} else if(input$indicador_us == "access" & input$indicador %in% c("TMI")) {
atividade_filtrada_min_sf()
} else if (input$indicador_us == "us") {
us_filtrado_ano_atividade_sf()
}
# ordenador data
data <- data %>% dplyr::arrange(valor)
# print(scale_limits()$max)
# legend_converter <- if (input$indicador_us == "access" & input$indicador %in% c("CMA") &
# input$atividade_cma %in% c("TT", "TB", "TM", "TA")) {
#
# function(x) scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
#
# } else if (input$indicador_us == "access" & input$indicador %in% c("CMP")) {
#
# function(x) scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
#
# } else if (input$indicador_us == "us" & input$indicador %in% c("TMI")) {
#
# function(x) scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
#
#
# } else if (input$indicador_us == "access" & input$indicador %in% c("TMI")) {
#
# function(x) as.integer(x)
#
# } else function(x) as.integer(x)
legend_converter <- function (x) as.integer(x)
legend <- if(input$indicador_us == "access" & input$indicador %in% c("CMA", "CMP")) {
i18n()$t("Oportunidades Acessíveis")
} else if(input$indicador_us == "access" & input$indicador %in% c("TMI")) {
i18n()$t("Minutos até a oportunidade mais próxima")
} else if (input$indicador_us == "us") {
i18n()$t("Quantidade")
}
# print("DATA")
# print(head(c(data$valor)))
# print(head(c(scale_limits()$max)))
palette <- fcase(input$indicador == "CMA", "inferno",
input$indicador == "CMP", "viridis",
input$indicador == "TMI", "viridis")
fill_values <- c(data$valor, scale_limits()$max)
if (input$indicador == "TMI") fill_values <- -fill_values else fill_values <- fill_values
fill_color <- colourvalues::colour_values(
# x = c(data$valor, 300000),
x = fill_values,
alpha = 200,
palette = palette
)
# delete the first
fill_color <- fill_color[-1]
# delete the last
# fill_color <- fill_color[-length(fill_color)]
# print(length(fill_color))
# print(head(fill_color))
# ADD THE COULOURS TO THE DATA
data$fill <- fill_color
# fill for the legend
# compose the vector of values
# print(head(data$fill))
# create legend
l <- colourvalues::colour_values(
# x = c(data$valor, 300000)
x = c(data$valor, scale_limits()$max)
, n_summaries = 6,
palette = palette
)
legend <- mapdeck::legend_element(
variables = legend_converter(l$summary_values)
, colours = if (input$indicador == "TMI") rev(l$summary_colours) else l$summary_colours
, colour_type = "fill"
, variable_type = "gradient"
, title = legend
)
js_legend <- mapdeck::mapdeck_legend(legend)
# create list with values for mapdeck options
mapdeck_options <- list(
# 'layer_id1' = ifelse(input$indicador %in% c("CMA", "CMP"), "acess_min_go", "acess_cum_go"),
# 'data' = if(input$indicador %in% c("CMA", "CMP")) tempo_filtrado_sf() else if (input$indicador %in% c("TMI")) ,
# 'layer_id2' = ifelse(input$indicador %in% c("CMA", "CMP"), "acess_cum_go", "acess_min_go"),
# 'palette1' = if (input$indicador %in% c("CMA", "CMP")) "inferno" else if (input$indicador %in% c("TMI")) colorss,
'legend_options1' = ifelse(input$indicador %in% c("CMA", "CMP"),
i18n()$t("Oportunidades Acessíveis"),
i18n()$t("Minutos até a oportunidade mais próxima"))
)
# Zoom in on the city when it's choosen
mapdeck_update(map_id = "map") %>%
mapdeck_view(location = c(centroid_go()$lon, centroid_go()$lat), zoom = zoom1(),
duration = 4000, transition = "fly") %>%
clear_polygon(layer_id = mapdeck_id_clear()) %>%
clear_pointcloud(layer_id = "brasil") %>%
clear_legend(layer_id = mapdeck_id_clear()) %>%
# # Render city limits
# add_polygon(
# data = limits_filtrado(),
# stroke_colour = "#616A6B",
# stroke_width = 100,
# fill_opacity = 0,
# update_view = FALSE,
# focus_layer = FALSE,
# ) %>%
# Render city indicator
add_polygon(
data = data,
fill_colour = "fill",
# fill_opacity = 200,
layer_id = mapdeck_id,
# layer_id = mapdeck_options$layer_id2,
# palette = mapdeck_options$palette1,
update_view = FALSE,
focus_layer = FALSE,
# auto_highlight = TRUE,
tooltip = "popup",
legend = js_legend,
# legend = TRUE,
# legend_options = list(title = i18n()$t(legend)),
# legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
stroke_opacity = 0
)
} else if (input$indicador_us == "us") {
# print(sprintf("Mapdeck id clear1: %s", mapdeck_id_clear()))
print("UUUUUUUUUIUIU")
# mapdeck_id <- "us_update"
# mapdeck_id_clear <- ifelse(input$indicador_us == "access", "us_initial", "access_initial")
# print(sprintf("Mapdeck id: %s", mapdeck_id))
legend_converter_us <- function(x) {
return( scales::comma(as.integer(x), big.mark = " ", accuracy = 1) )
}
legend_converter <- if (input$indicador_us == "us" & grepl("^(P|T)", indicador_us_ok())) {
legend_converter_us
} else as.integer
legend_fill <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002", "R003")) {
"rdylbu"
} else if (input$demo_ou_us == "activity") "viridis"
legend_title <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R001")) {
"Renda per capita (R$)"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002")) {
"Quintil de renda"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R003")) {
"Decil de renda"
} else if (input$demo_ou_us == "activity") "Quantidade" else "Quantidade"
mapdeck_update(map_id = "map") %>%
mapdeck_view(location = c(centroid_go()$lon, centroid_go()$lat), zoom = zoom1(),
duration = 4000, transition = "fly") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = us_filtrado_ano_atividade_sf(),
fill_colour = "valor",
fill_opacity = 200,
layer_id = mapdeck_id,
palette = legend_fill,
update_view = FALSE,
focus_layer = FALSE,
tooltip = "popup",
legend = TRUE,
na_colour = "#80808000",
legend_options = list(title = i18n()$t(legend_title)),
legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
stroke_opacity = 0
)
# mapdeck_id_clear(mapdeck_id)
}
mapdeck_id_clear(mapdeck_id)
waiter_hide()
})
# Observe any change on the atrributes on the city and change the map accordingly
observeEvent({c(input$indicador_us,
input$indicador,
input$ano,
input$modo_todos, input$modo_ativo,
input$atividade_cma, input$atividade_cmp, input$atividade_min,
input$tempo_tp, input$tempo_ativo)},{
req(input$indicador_us == "access")
# legend_converter_cma <- function(x) {
# scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
# }
#
# legend_converter <- if (input$indicador_us == "access" & input$indicador %in% c("TMI")) {
# as.integer
# } else legend_converter_cma
legend_converter <- function (x) as.integer(x)
mapdeck_id <- "access_update"
if (input$indicador_us == "access") {
print(sprintf("Mapdeck id clear2: %s", mapdeck_id_clear()))
if (input$indicador == "TMI") {
data <- atividade_filtrada_min_sf() %>%
dplyr::arrange(valor)
print("mean")
print(mean(data$valor))
print(min(data$valor))
fill_color <- colourvalues::colour_values(
# x = c(data$valor, 300000),
x = -c(data$valor, scale_limits()$max),
alpha = 200,
palette = "viridis"
)
# adjust vector with colors
# delete the first
# fill_color <- fill_color[-1]
# delete the last
fill_color <- fill_color[-length(fill_color)]
# ADD THE COULOURS TO THE DATA
data <- data %>%
dplyr::mutate(fill = fill_color)
# print("UEEEEE")
# print(data$fill)
# fill for the legend
# compose the vector of values
# print(head(data$fill))
# create legend
l <- colourvalues::colour_values(
# x = c(data$valor, 300000)
x = c(data$valor, scale_limits()$max)
, n_summaries = 6,
palette = "viridis"
)
legend <- mapdeck::legend_element(
variables = legend_converter(l$summary_values)
, colours = rev(l$summary_colours)
, colour_type = "fill"
, variable_type = "gradient"
, title = i18n()$t("Minutos até a oportunidade mais próxima")
)
js_legend <- mapdeck::mapdeck_legend(legend)
# # create viridis scale in the reverse direction
# # create matrix
# colorss <- colourvalues::color_values_rgb(x = 1:256, "viridis")
# # invert matrix
# colorss <- apply(colorss, 2, rev)[, 1:3]
# # add alpha
# colorss <- cbind(colorss, 200)
mapdeck_update(map_id = "map") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = data,
fill_colour = "fill",
# fill_opacity = 200,
layer_id = mapdeck_id,
# palette = colorss,
update_view = FALSE,
tooltip = "popup",
legend = js_legend,
# legend_options = list(title = i18n()$t("Minutos até a oportunidade mais próxima")),
# legend_format = list( fill_colour = legend_converter),
stroke_width = 0,
stroke_colour = NULL,
stroke_opacity = 0
)
} else
if (input$indicador %in% c("CMA", "CMP")) {
data <- tempo_filtrado_sf() %>%
dplyr::arrange(valor)
# print("AAAAAAAAh")
fill_color <- colourvalues::colour_values(
# x = c(data$valor, 300000),
x = c(data$valor, scale_limits()$max),
alpha = 200,
palette = "inferno"
)
# print(fill_color[-length(fill_color)])
# print(head(tempo_filtrado_sf()))
# print(c(tempo_filtrado_sf()$valor, scale_limits()$max))
# ADD THE COULOURS TO THE DATA
data <- data %>%
dplyr::mutate(fill = fill_color[-length(fill_color)])
# fill for the legend
# compose the vector of values
# create legend
l <- colourvalues::colour_values(
# x = c(data$valor, 300000)
x = c(data$valor, scale_limits()$max)
, n_summaries = 6,
palette = "inferno"
)
legend <- mapdeck::legend_element(
variables = legend_converter(l$summary_values)
, colours = l$summary_colours
, colour_type = "fill"
, variable_type = "gradient"
, title = i18n()$t("Oportunidades Acessíveis")
)
js_legend <- mapdeck::mapdeck_legend(legend)
mapdeck_update(map_id = "map") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = data,
fill_colour = "fill",
fill_opacity = 200,
layer_id = mapdeck_id,
# palette = "inferno",
update_view = FALSE,
focus_layer = FALSE,
# auto_highlight = TRUE,
tooltip = "popup",
legend = js_legend,
# legend_options = list(title = i18n()$t("Oportunidades Acessíveis")),
# legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
stroke_opacity = 0
)
}
}
mapdeck_id_clear(mapdeck_id)
})
# Observe any change on the atrributes on the city and change the map accordingly
# only for land use
observeEvent({c(input$indicador_us,
input$ano_us,
input$ano_demo,
input$demo_ou_us,
input$atividade_demo, input$atividade_us)},{
# print(nrow(atividade_filtrada_min_sf))
# legend_converter_us <- function(x) {
# return( scales::comma(as.integer(x), big.mark = " ", accuracy = 1) )
# }
#
# legend_converter <- if (input$indicador_us == "us" & grepl("^(P|T)", indicador_us_ok())) {
# legend_converter_us
# } else as.integer
legend_converter <- function (x) as.integer(x)
legend_fill <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002", "R003")) {
"rdylbu"
} else if (input$demo_ou_us == "activity") "viridis"
legend_title <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R001")) {
"Renda per capita (R$)"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002")) {
"Quintil de renda"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R003")) {
"Decil de renda"
} else if (input$demo_ou_us == "activity") "Quantidade" else "Quantidade"
if (input$indicador_us == "us") {
print(sprintf("Mapdeck id clear1: %s", mapdeck_id_clear()))
mapdeck_id <- "us_update"
# mapdeck_id_clear <- ifelse(input$indicador_us == "access", "us_initial", "access_initial")
# print(sprintf("Mapdeck id: %s", mapdeck_id))
mapdeck_update(map_id = "map") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = us_filtrado_ano_atividade_sf(),
fill_colour = "valor",
fill_opacity = 200,
layer_id = mapdeck_id,
palette = legend_fill,
update_view = FALSE,
focus_layer = FALSE,
tooltip = "popup",
legend = TRUE,
legend_options = list(title = i18n()$t(legend_title)),
legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
na_colour = "#80808000",
stroke_opacity = 0
)
mapdeck_id_clear(mapdeck_id)
}
})
|
/atlasacessibilidade/app_files/map_server.R
|
no_license
|
ipeaGIT/acesso_app
|
R
| false
| false
| 36,468
|
r
|
# MAP SERVER
# 1) REACTIVE TO FILTER THE CITY -----------------------------------------------------------------
# First we use a reactive expression to choose the input
# We created a 'fake' city to represent the Brazil map
# a_city <- reactive({
#
# if(input$cidade != "") {input$cidade} else {"fake"}
#
#
# })
v_city <- reactiveValues(cidade = NULL)
observeEvent(c(input$cidade), {
print(input$cidade)
v_city$cidade <- if(isTruthy(input$cidade) & input$cidade != "") input$cidade else NULL
# print(v_city$cidade)
})
# v_city <- reactive({
#
# req(input$cidade)
# # print(input$cidade)
# if(input$cidade != "") input$cidade else NULL
#
# })
# observer to update the city if the circle from the city is clicked
observeEvent(c(input$map_pointcloud_click), {
req(input$map_pointcloud_click)
js <- input$map_pointcloud_click
lst <- jsonlite::fromJSON( js )
row <- (lst$index) + 1
# print(row)
centroids_filter <- centroids[row,]
centroids_filter <- centroids_filter$abrev_muni
# print(centroids_filter)
v_city$cidade <- centroids_filter
# update the city input picker
updatePickerInput(session = session, inputId = "cidade",
selected = v_city$cidade)
})
output$city <- reactive({
v_city$cidade
# !is.null(v_city$cidade)
})
outputOptions(output, 'city', suspendWhenHidden = FALSE)
rv <- reactiveValues(prev_bins = NULL)
# observer to change the labels of each year
observeEvent(v_city$cidade, {
if (v_city$cidade %in% c("for", "spo", "cam", "bho", "poa", "cur")) {
choices_new <- list(HTML("2017 <i class=\"fas fa-bus\"></i>"),
HTML("2018 <i class=\"fas fa-bus\"></i>"),
HTML("2019 <i class=\"fas fa-bus\"></i>"))
} else if(v_city$cidade %in% c("rio")) {
choices_new <- list(HTML("2017"),
HTML("2018 <i class=\"fas fa-bus\"></i>") ,
HTML("2019 <i class=\"fas fa-bus\"></i>") )
} else if(v_city$cidade %in% c("rec", "goi")) {
choices_new <- list(HTML("2017"),
HTML("2018") ,
HTML("2019 <i class=\"fas fa-bus\"></i>"))
} else choices_new <- list("2017", "2018", "2019")
updateRadioButtons(session = session,
inputId = "ano",
choiceValues = c("2017", "2018", "2019"),
choiceNames = choices_new)
})
# first, identify previous city
observeEvent(c(v_city$cidade), {
# rv$prev_bins <- c(rv$prev_bins, v_city$cidade)
rv$prev_bins <- c(tail(rv$prev_bins, 1), v_city$cidade)
# print("rv$prev_bins")
# print(v_city$cidade == rv$prev_bins)
# print(all(v_city$cidade == rv$prev_bins))
})
observeEvent(c(input$modo_ativo, input$ano), {
print("GAROTO")
print(a())
updateRadioGroupButtons(
session = session,
inputId = "modo_todos",
selected = a()
)
})
observeEvent(c(input$modo_ativo), {
# print("GAROTO")
#
# updateRadioGroupButtons(
# session = session,
# inputId = "modo_todos",
# selected = "walk"
# )
})
cidade_filtrada <- reactive({
# only run when city value is not NULL
req(v_city$cidade)
# print(v_city$cidade)
# print(input$cidade)
# open city and hex here!!!!!!!!!!!!
readRDS(sprintf("data/new/access/access_%s.rds", v_city$cidade))
# acess[sigla_muni == v_city$city]
# print(head(readRDS(sprintf("data/new/access_%s.rds", v_city$cidade))))
})
hex_filtrado <- reactive({
# only run when city value is not NULL
req(v_city$cidade)
# open city and hex here!!!!!!!!!!!!
readRDS(sprintf("data/new/hex/hex_%s.rds", v_city$cidade))
})
# reactive to filter the year -----------------------------------------------------------------
ano_filtrado <- reactive({
# print(table(cidade_filtrada()$year))
# print(sprintf("a: %s", a()))
# print(sprintf("Year selected: %s", input$ano))
# print(sprintf("US: %s", input$demo_ou_us))
cidade_filtrada()[year == input$ano]
# print(nrow(cidade_filtrada()[year == input$ano]))
})
# here we should create the observer for the landuse indicator --------------------------------
us_filtrado <- reactive({
# print(sprintf("Us deu certo? %s", input$indicador_us))
if (input$indicador_us == "us") {
# open city and hex here!!!!!!!!!!!!
readRDS(sprintf("data/new/landuse/landuse_%s.rds", v_city$cidade))
}
})
us_filtrado_type <- reactive({
req(us_filtrado())
# print(sprintf("aaiaiai %s", input$demo_ou_us))
if (input$demo_ou_us == "demo") {
# get pop variables
pop <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("P"))]
# print(pop)
# get renda variables
renda <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("R"))]
# print(renda)
cols <- c('id_hex', 'year', pop, renda)
# print(cols)
us_filtrado()[, ..cols]
} else if (input$demo_ou_us == "activity") {
# get us variables
us1 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("T"))]
us2 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("E"))]
us3 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("M"))]
us4 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("S"))]
us5 <- colnames(us_filtrado())[startsWith(colnames(us_filtrado()), c("C"))]
cols <- c('id_hex', 'year', us1, us2, us3, us4, us5)
us_filtrado()[, ..cols]
}
})
# get the year of the demo or us
indicador_year_us_ok <- reactive({
# print(input$indicador)
if (input$demo_ou_us == "demo") {
input$ano_demo
} else if (input$demo_ou_us == "activity"){
input$ano_us
}
})
us_filtrado_ano <- reactive({
# nrow(us_filtrado_type()[year == input$ano_us])
us_filtrado_type()[year == indicador_year_us_ok()]
})
# para selecionar o input de uso do solo correto
indicador_us_ok <- reactive({
# print(input$indicador)
if (input$demo_ou_us == "demo") {
input$atividade_demo
} else if (input$demo_ou_us == "activity"){
input$atividade_us
}
})
# observeEvent(input$demo_ou_us, {
#
# if (input$demo_ou_us == "demo") {
#
# vars <- c("gua", "gua")
#
# } else if (input$demo_ou_us == "activity") {
#
# vars <- c("gu", "gu")
#
# }
#
# updatePickerInput(session = session,
# inputId = "atividade_demo",
# label = "Teste",
# choices = vars)
#
# })
# filter final indicator for us
us_filtrado_ano_atividade <- reactive({
# print(nrow(us_filtrado_ano()))
# print(colnames(us_filtrado_ano()))
# print(sprintf("Indicador us ok: %s", indicador_us_ok()))
# print(colnames(us_filtrado_ano()))
cols <- c("id_hex", indicador_us_ok())
# print(cols)
a <- us_filtrado_ano()[, ..cols]
colnames(a) <- c('id_hex', 'valor')
# print(head(a))
a[, id := 1:nrow(a)]
# identifica indicador
a[, indicador := indicador_us_ok()]
# make tooltip
unity <- fcase(
startsWith(indicador_us_ok(), "P"), i18n()$t(" pessoas"),
startsWith(indicador_us_ok(), "R001"), i18n()$t(" R$"),
startsWith(indicador_us_ok(), "R002"), i18n()$t(" Quintil"),
startsWith(indicador_us_ok(), "R003"), i18n()$t(" Decil"),
startsWith(indicador_us_ok(), "T"), i18n()$t(" empregos"),
startsWith(indicador_us_ok(), "E"), i18n()$t(" equipamentos de educação"),
startsWith(indicador_us_ok(), "S"), i18n()$t(" equipamentos de saúde"),
startsWith(indicador_us_ok(), "C"), i18n()$t(" cras")
)
a[, popup :=
fifelse(startsWith(indicador, "R00"),
sprintf("<strong>%s:</strong> %s %s", i18n()$t("Valor"), unity, scales::comma(as.integer(valor), big.mark = " ")),
sprintf("<strong>%s:</strong> %s %s", i18n()$t("Valor"), scales::comma(as.integer(valor), big.mark = " "), unity)
)]
# print(head(a))
# print(valor)
# return(a)
return(a)
})
us_filtrado_ano_atividade_sf <- reactive({
data.table::setkeyv(us_filtrado_ano_atividade(), c('id_hex'))
a <- us_filtrado_ano_atividade()[hex_filtrado(), on = 'id_hex', geom := i.geom]
# to sf
a <- st_sf(a, crs = 4326)
return(a)
})
# 2) REACTIVE TO FILTER THE MODE -----------------------------------------------------------------
modo_cidade <- reactiveValues(teste = NULL)
a <- reactive({
# !all(v_city$cidade == rv$prev_bins)
req(v_city$cidade)
if (v_city$cidade %in% c('for', 'spo', 'cur', 'poa', 'bho', 'cam') & input$ano %in% c(2017, 2018, 2019)) {
return(input$modo_todos)
} else if(v_city$cidade %in% c('rio') & input$ano %in% c(2018, 2019)) {
return(input$modo_todos)
} else if(v_city$cidade %in% c('rec', 'goi') & input$ano %in% c(2019)) {
return(input$modo_todos)
} else {
return(input$modo_ativo)
}
})
output$tp <- reactive({
a() %in% c("public_transport", "car")
})
outputOptions(output, 'tp', suspendWhenHidden = FALSE)
# Reactive para a modo
modo_filtrado <- reactive({
# print(sprintf("ano filtrado nrow: %s", nrow(ano_filtrado())))
# print(sprintf("Mode selected: %s", a()))
ano_filtrado()[mode == a()]
})
# 3) REACTIVE TO FILTER THE INDICATOR --------------------------------------------------------------
indicador_filtrado <- reactive({
# print(sprintf("go: %s", input$indicador))
# print(sprintf("modo filtrado nrow: %s", nrow(modo_filtrado())))
cols <- c('id_hex', 'P001', grep(input$indicador, colnames(modo_filtrado()), ignore.case = TRUE, value = TRUE))
modo_filtrado()[, ..cols]
# print(head(modo_filtrado()[, ..cols])) # ok
})
# 4) REACTIVE TO FILTER THE ACTIVITY ---------------------------------------------------------------
indicador_ok <- reactive({
# print(input$indicador)
if (input$indicador %in% c("CMA")) {
input$atividade_cma
} else if (input$indicador == "CMP"){
input$atividade_cmp
} else if (input$indicador == "TMI") {
input$atividade_min
}
})
# Reactive para a atividade para indicador cumulativo
atividade_filtrada_cma <- reactive({
req(input$indicador %in% c("CMA", "CMP"))
# print(input$atividade_cma)
# print(input$atividade_cmp)
# print(colnames(indicador_filtrado()))
cols <- c('id_hex', 'P001', grep(indicador_ok(), colnames(indicador_filtrado()), ignore.case = TRUE, value = TRUE))
indicador_filtrado()[, ..cols]
# print(head(indicador_filtrado()[, ..cols]))
})
ind <- reactiveValues(ind = NULL)
# Reactive para a atividade para indicador tempo minimo
atividade_filtrada_min <- reactive({
if (input$indicador == "TMI") {
# print("Indicador ok")
# print(indicador_ok())
# req(input$atividade_min)
cols <- c('id_hex', 'P001', grep(input$atividade_min, colnames(indicador_filtrado()), ignore.case = TRUE, value = TRUE))
indicador_filtrado1 <- indicador_filtrado()[, ..cols]
# guardar nome do indicador
ind$ind <- cols[3]
colnames(indicador_filtrado1) <- c('id_hex', 'P001', 'valor')
indicador_filtrado1[, id := 1:nrow(indicador_filtrado1)]
indicador_filtrado1[, popup := paste0(i18n()$t("<strong>População:</strong> "), P001, i18n()$t("<br><strong>Valor da acessibilidade:</strong> "), round(valor, 0), " ", i18n()$t("minutos"))]
return(indicador_filtrado1)
}
})
# 5) REACTIVE TO FILTER THE TIME THRESHOLD ---------------------------------------------------------
# This filter is only applied to the cumulative indicator
# Select time threshold
b <- reactive({
req(v_city$cidade)
# switch (v_city$cidade,
# c('for', 'spo', 'rio', 'cur', 'poa', 'bho', 'rec') & input$modo_todos %in% "public_transport" = input$tempo_tp,
# c('for', 'spo', 'rio', 'cur', 'poa', 'bho', 'rec') & input$modo_todos %in% "public_transport" = input$tempo_tp,
# c('for', 'spo', 'rio', 'cur', 'poa', 'bho', 'rec') & input$modo_todos %in% "public_transport" = input$tempo_tp,
#
# )
if (a() %in% c("public_transport", "car")) input$tempo_tp else if(a() %in% c("walk", "bicycle")) input$tempo_ativo
})
# Reactive for time threshold
tempo_filtrado <- reactive({
# print(sprintf("b: %s", b()))
# print(colnames(atividade_filtrada_cma()))
req(atividade_filtrada_cma())
cols <- c('id_hex', 'P001', grep(b(), colnames(atividade_filtrada_cma()), ignore.case = TRUE, value = TRUE))
atividade_filtrada1 <- atividade_filtrada_cma()[, ..cols]
# guardar nome do indicador
ind$ind <- cols[3]
# print(ind$ind)
colnames(atividade_filtrada1) <- c('id_hex', 'P001', 'valor')
atividade_filtrada1[, id := 1:nrow(atividade_filtrada1)]
atividade_filtrada1[, popup := paste0(i18n()$t("<strong>População:</strong> "), P001, i18n()$t("<br><strong>Valor da acessibilidade:</strong> "),
scales::comma(as.integer(valor), big.mark = " "))]
# print(head(atividade_filtrada1))
})
# 6) TRANSFORM TO SF -------------------------------------------------------------------------------
atividade_filtrada_min_sf <- reactive({
req(atividade_filtrada_min())
data.table::setkeyv(atividade_filtrada_min(), c('id_hex'))
atividade_filtrada_min_sf1 <- atividade_filtrada_min()[hex_filtrado(), on = 'id_hex', geom := i.geom]
# to sf
atividade_filtrada_min_sf1 <- st_sf(atividade_filtrada_min_sf1, crs = 4326)
# print("BORAAAA")
# print(head(atividade_filtrada_min_sf1))
return(atividade_filtrada_min_sf1)
})
tempo_filtrado_sf <- reactive({
# print(hex_filtrado())
# merge
data.table::setkeyv(tempo_filtrado(), c('id_hex'))
tempo_filtrado_sf1 <- tempo_filtrado()[hex_filtrado(), on = 'id_hex', geom := i.geom]
# to sf
tempo_filtrado_sf1 <- st_sf(tempo_filtrado_sf1, crs = 4326)
})
# filter the scale limits of each indicator
scale_limits <- reactive({
# print(head(access_limits))
# filter indicator
access_extremes1 <- access_limits[abbrev_muni == v_city$cidade & mode == a()]
cols <- c("abbrev_muni", "mode", grep(ind$ind, colnames(access_extremes1), ignore.case = TRUE, value = TRUE))
access_extremes1 <- access_extremes1[,..cols]
colnames(access_extremes1) <- c("abbrev_muni", "mode", "min", "max")
# print(head(access_extremes1))
return(access_extremes1)
})
# 7) RENDER BRAZIL'S BASEMAP -------------------------------------------------------
output$map <- renderMapdeck({
mapdeck(location = c(-43.95988, -19.902739),
zoom = 3,
style = "mapbox://styles/kauebraga/cl3vtf5ay005v14pkzouvp0yk"
) %>%
add_pointcloud(data = centroids,
lon = "lon", lat = "lat",
update_view = FALSE,
layer_id = "brasil",
# fill_colour = "blue",
fill_opacity = 170,
# auto_highlight = TRUE
# id = "brasil",
tooltip = "name_muni"
)
})
# observeEvent(c(input$map_pointcloud_click), {
#
# # req(input$map_arc_click)
#
# js <- input$map_pointcloud_click
# lst <- jsonlite::fromJSON( js )
# row <- (lst$index) + 1
#
# print(row)
#
# })
# Stop the loading page here !
waiter_hide()
# reactive to get city limits
limits_filtrado <- reactive({
# Filter cities limits
limits_filtrado <- limits[abrev_muni == v_city$cidade] %>% st_sf(crs = 4326)
# print(limits_filtrado)
})
centroid_go <- reactive({
centroid_go <- centroids[abrev_muni == v_city$cidade]
# print(centroid_go)
})
zoom1 <- reactive ({
# Choose zoom based on city: some cities are bigger than others
if(v_city$cidade %in% c("spo", "man", "cgr", "bsb")) {
zoom1 <- 9
} else if(v_city$cidade %in% c("mac", "for", "nat", "rec", "sal", "slz", "bho")) {
zoom1 <- 11
} else {zoom1 <- 10}
# print(sprintf("zoom: %s", zoom1))
})
mapdeck_id_clear <- reactiveVal("us_initial")
# 8) OBSERVER TO RENDER THE CITY INDICATOR -------------------------------------------------------
observeEvent({v_city$cidade},{
mapdeck_id <- ifelse(input$indicador_us == "access", "access_initial", "us_initial")
# print(sprintf("Mapdeck id: %s", mapdeck_id))
# print(sprintf("Mapdeck id clear: %s", mapdeck_id_clear()))
waiter_show(html = tagList(spin_loaders(id = 2, color = "black")),
color = "rgba(233, 235, 240, .4)")
if (input$indicador_us == "access") {
# select variables
data <- if(input$indicador_us == "access" & input$indicador %in% c("CMA", "CMP")) {
tempo_filtrado_sf()
} else if(input$indicador_us == "access" & input$indicador %in% c("TMI")) {
atividade_filtrada_min_sf()
} else if (input$indicador_us == "us") {
us_filtrado_ano_atividade_sf()
}
# ordenador data
data <- data %>% dplyr::arrange(valor)
# print(scale_limits()$max)
# legend_converter <- if (input$indicador_us == "access" & input$indicador %in% c("CMA") &
# input$atividade_cma %in% c("TT", "TB", "TM", "TA")) {
#
# function(x) scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
#
# } else if (input$indicador_us == "access" & input$indicador %in% c("CMP")) {
#
# function(x) scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
#
# } else if (input$indicador_us == "us" & input$indicador %in% c("TMI")) {
#
# function(x) scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
#
#
# } else if (input$indicador_us == "access" & input$indicador %in% c("TMI")) {
#
# function(x) as.integer(x)
#
# } else function(x) as.integer(x)
legend_converter <- function (x) as.integer(x)
legend <- if(input$indicador_us == "access" & input$indicador %in% c("CMA", "CMP")) {
i18n()$t("Oportunidades Acessíveis")
} else if(input$indicador_us == "access" & input$indicador %in% c("TMI")) {
i18n()$t("Minutos até a oportunidade mais próxima")
} else if (input$indicador_us == "us") {
i18n()$t("Quantidade")
}
# print("DATA")
# print(head(c(data$valor)))
# print(head(c(scale_limits()$max)))
palette <- fcase(input$indicador == "CMA", "inferno",
input$indicador == "CMP", "viridis",
input$indicador == "TMI", "viridis")
fill_values <- c(data$valor, scale_limits()$max)
if (input$indicador == "TMI") fill_values <- -fill_values else fill_values <- fill_values
fill_color <- colourvalues::colour_values(
# x = c(data$valor, 300000),
x = fill_values,
alpha = 200,
palette = palette
)
# delete the first
fill_color <- fill_color[-1]
# delete the last
# fill_color <- fill_color[-length(fill_color)]
# print(length(fill_color))
# print(head(fill_color))
# ADD THE COULOURS TO THE DATA
data$fill <- fill_color
# fill for the legend
# compose the vector of values
# print(head(data$fill))
# create legend
l <- colourvalues::colour_values(
# x = c(data$valor, 300000)
x = c(data$valor, scale_limits()$max)
, n_summaries = 6,
palette = palette
)
legend <- mapdeck::legend_element(
variables = legend_converter(l$summary_values)
, colours = if (input$indicador == "TMI") rev(l$summary_colours) else l$summary_colours
, colour_type = "fill"
, variable_type = "gradient"
, title = legend
)
js_legend <- mapdeck::mapdeck_legend(legend)
# create list with values for mapdeck options
mapdeck_options <- list(
# 'layer_id1' = ifelse(input$indicador %in% c("CMA", "CMP"), "acess_min_go", "acess_cum_go"),
# 'data' = if(input$indicador %in% c("CMA", "CMP")) tempo_filtrado_sf() else if (input$indicador %in% c("TMI")) ,
# 'layer_id2' = ifelse(input$indicador %in% c("CMA", "CMP"), "acess_cum_go", "acess_min_go"),
# 'palette1' = if (input$indicador %in% c("CMA", "CMP")) "inferno" else if (input$indicador %in% c("TMI")) colorss,
'legend_options1' = ifelse(input$indicador %in% c("CMA", "CMP"),
i18n()$t("Oportunidades Acessíveis"),
i18n()$t("Minutos até a oportunidade mais próxima"))
)
# Zoom in on the city when it's choosen
mapdeck_update(map_id = "map") %>%
mapdeck_view(location = c(centroid_go()$lon, centroid_go()$lat), zoom = zoom1(),
duration = 4000, transition = "fly") %>%
clear_polygon(layer_id = mapdeck_id_clear()) %>%
clear_pointcloud(layer_id = "brasil") %>%
clear_legend(layer_id = mapdeck_id_clear()) %>%
# # Render city limits
# add_polygon(
# data = limits_filtrado(),
# stroke_colour = "#616A6B",
# stroke_width = 100,
# fill_opacity = 0,
# update_view = FALSE,
# focus_layer = FALSE,
# ) %>%
# Render city indicator
add_polygon(
data = data,
fill_colour = "fill",
# fill_opacity = 200,
layer_id = mapdeck_id,
# layer_id = mapdeck_options$layer_id2,
# palette = mapdeck_options$palette1,
update_view = FALSE,
focus_layer = FALSE,
# auto_highlight = TRUE,
tooltip = "popup",
legend = js_legend,
# legend = TRUE,
# legend_options = list(title = i18n()$t(legend)),
# legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
stroke_opacity = 0
)
} else if (input$indicador_us == "us") {
# print(sprintf("Mapdeck id clear1: %s", mapdeck_id_clear()))
print("UUUUUUUUUIUIU")
# mapdeck_id <- "us_update"
# mapdeck_id_clear <- ifelse(input$indicador_us == "access", "us_initial", "access_initial")
# print(sprintf("Mapdeck id: %s", mapdeck_id))
legend_converter_us <- function(x) {
return( scales::comma(as.integer(x), big.mark = " ", accuracy = 1) )
}
legend_converter <- if (input$indicador_us == "us" & grepl("^(P|T)", indicador_us_ok())) {
legend_converter_us
} else as.integer
legend_fill <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002", "R003")) {
"rdylbu"
} else if (input$demo_ou_us == "activity") "viridis"
legend_title <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R001")) {
"Renda per capita (R$)"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002")) {
"Quintil de renda"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R003")) {
"Decil de renda"
} else if (input$demo_ou_us == "activity") "Quantidade" else "Quantidade"
mapdeck_update(map_id = "map") %>%
mapdeck_view(location = c(centroid_go()$lon, centroid_go()$lat), zoom = zoom1(),
duration = 4000, transition = "fly") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = us_filtrado_ano_atividade_sf(),
fill_colour = "valor",
fill_opacity = 200,
layer_id = mapdeck_id,
palette = legend_fill,
update_view = FALSE,
focus_layer = FALSE,
tooltip = "popup",
legend = TRUE,
na_colour = "#80808000",
legend_options = list(title = i18n()$t(legend_title)),
legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
stroke_opacity = 0
)
# mapdeck_id_clear(mapdeck_id)
}
mapdeck_id_clear(mapdeck_id)
waiter_hide()
})
# Observe any change on the atrributes on the city and change the map accordingly
observeEvent({c(input$indicador_us,
input$indicador,
input$ano,
input$modo_todos, input$modo_ativo,
input$atividade_cma, input$atividade_cmp, input$atividade_min,
input$tempo_tp, input$tempo_ativo)},{
req(input$indicador_us == "access")
# legend_converter_cma <- function(x) {
# scales::comma(as.integer(x), big.mark = " ", accuracy = 100)
# }
#
# legend_converter <- if (input$indicador_us == "access" & input$indicador %in% c("TMI")) {
# as.integer
# } else legend_converter_cma
legend_converter <- function (x) as.integer(x)
mapdeck_id <- "access_update"
if (input$indicador_us == "access") {
print(sprintf("Mapdeck id clear2: %s", mapdeck_id_clear()))
if (input$indicador == "TMI") {
data <- atividade_filtrada_min_sf() %>%
dplyr::arrange(valor)
print("mean")
print(mean(data$valor))
print(min(data$valor))
fill_color <- colourvalues::colour_values(
# x = c(data$valor, 300000),
x = -c(data$valor, scale_limits()$max),
alpha = 200,
palette = "viridis"
)
# adjust vector with colors
# delete the first
# fill_color <- fill_color[-1]
# delete the last
fill_color <- fill_color[-length(fill_color)]
# ADD THE COULOURS TO THE DATA
data <- data %>%
dplyr::mutate(fill = fill_color)
# print("UEEEEE")
# print(data$fill)
# fill for the legend
# compose the vector of values
# print(head(data$fill))
# create legend
l <- colourvalues::colour_values(
# x = c(data$valor, 300000)
x = c(data$valor, scale_limits()$max)
, n_summaries = 6,
palette = "viridis"
)
legend <- mapdeck::legend_element(
variables = legend_converter(l$summary_values)
, colours = rev(l$summary_colours)
, colour_type = "fill"
, variable_type = "gradient"
, title = i18n()$t("Minutos até a oportunidade mais próxima")
)
js_legend <- mapdeck::mapdeck_legend(legend)
# # create viridis scale in the reverse direction
# # create matrix
# colorss <- colourvalues::color_values_rgb(x = 1:256, "viridis")
# # invert matrix
# colorss <- apply(colorss, 2, rev)[, 1:3]
# # add alpha
# colorss <- cbind(colorss, 200)
mapdeck_update(map_id = "map") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = data,
fill_colour = "fill",
# fill_opacity = 200,
layer_id = mapdeck_id,
# palette = colorss,
update_view = FALSE,
tooltip = "popup",
legend = js_legend,
# legend_options = list(title = i18n()$t("Minutos até a oportunidade mais próxima")),
# legend_format = list( fill_colour = legend_converter),
stroke_width = 0,
stroke_colour = NULL,
stroke_opacity = 0
)
} else
if (input$indicador %in% c("CMA", "CMP")) {
data <- tempo_filtrado_sf() %>%
dplyr::arrange(valor)
# print("AAAAAAAAh")
fill_color <- colourvalues::colour_values(
# x = c(data$valor, 300000),
x = c(data$valor, scale_limits()$max),
alpha = 200,
palette = "inferno"
)
# print(fill_color[-length(fill_color)])
# print(head(tempo_filtrado_sf()))
# print(c(tempo_filtrado_sf()$valor, scale_limits()$max))
# ADD THE COULOURS TO THE DATA
data <- data %>%
dplyr::mutate(fill = fill_color[-length(fill_color)])
# fill for the legend
# compose the vector of values
# create legend
l <- colourvalues::colour_values(
# x = c(data$valor, 300000)
x = c(data$valor, scale_limits()$max)
, n_summaries = 6,
palette = "inferno"
)
legend <- mapdeck::legend_element(
variables = legend_converter(l$summary_values)
, colours = l$summary_colours
, colour_type = "fill"
, variable_type = "gradient"
, title = i18n()$t("Oportunidades Acessíveis")
)
js_legend <- mapdeck::mapdeck_legend(legend)
mapdeck_update(map_id = "map") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = data,
fill_colour = "fill",
fill_opacity = 200,
layer_id = mapdeck_id,
# palette = "inferno",
update_view = FALSE,
focus_layer = FALSE,
# auto_highlight = TRUE,
tooltip = "popup",
legend = js_legend,
# legend_options = list(title = i18n()$t("Oportunidades Acessíveis")),
# legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
stroke_opacity = 0
)
}
}
mapdeck_id_clear(mapdeck_id)
})
# Observe any change on the atrributes on the city and change the map accordingly
# only for land use
observeEvent({c(input$indicador_us,
input$ano_us,
input$ano_demo,
input$demo_ou_us,
input$atividade_demo, input$atividade_us)},{
# print(nrow(atividade_filtrada_min_sf))
# legend_converter_us <- function(x) {
# return( scales::comma(as.integer(x), big.mark = " ", accuracy = 1) )
# }
#
# legend_converter <- if (input$indicador_us == "us" & grepl("^(P|T)", indicador_us_ok())) {
# legend_converter_us
# } else as.integer
legend_converter <- function (x) as.integer(x)
legend_fill <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002", "R003")) {
"rdylbu"
} else if (input$demo_ou_us == "activity") "viridis"
legend_title <- if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R001")) {
"Renda per capita (R$)"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R002")) {
"Quintil de renda"
} else if (input$demo_ou_us == "demo" & input$atividade_demo %in% c("R003")) {
"Decil de renda"
} else if (input$demo_ou_us == "activity") "Quantidade" else "Quantidade"
if (input$indicador_us == "us") {
print(sprintf("Mapdeck id clear1: %s", mapdeck_id_clear()))
mapdeck_id <- "us_update"
# mapdeck_id_clear <- ifelse(input$indicador_us == "access", "us_initial", "access_initial")
# print(sprintf("Mapdeck id: %s", mapdeck_id))
mapdeck_update(map_id = "map") %>%
clear_polygon(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
clear_legend(layer_id = ifelse(mapdeck_id_clear() == mapdeck_id, "oi", mapdeck_id_clear())) %>%
add_polygon(
data = us_filtrado_ano_atividade_sf(),
fill_colour = "valor",
fill_opacity = 200,
layer_id = mapdeck_id,
palette = legend_fill,
update_view = FALSE,
focus_layer = FALSE,
tooltip = "popup",
legend = TRUE,
legend_options = list(title = i18n()$t(legend_title)),
legend_format = list( fill_colour = legend_converter),
stroke_width = NULL,
stroke_colour = NULL,
na_colour = "#80808000",
stroke_opacity = 0
)
mapdeck_id_clear(mapdeck_id)
}
})
|
context("Tests for the generateSettings() function")
library(safetyGraphics)
setting_names<-c("id_col","value_col","measure_col","normal_col_low","normal_col_high","studyday_col", "visit_col", "visitn_col", "filters","group_cols", "measure_values", "baseline", "analysisFlag", "x_options", "y_options", "visit_window", "r_ratio_filter", "r_ratio_cut", "showTitle", "warningText", "unit_col", "start_value", "details", "missingValues", "unscheduled_visit_pattern","unscheduled_visits","visits_without_data",'calculate_palt')
test_that("a list with the expected properties and structure is returned for all standards",{
expect_is(generateSettings(standard="None"),"list")
expect_equal(sort(names(generateSettings(standard="None"))),sort(setting_names))
expect_equal(sort(names(generateSettings(standard="None")[["measure_values"]])), sort(c("ALT","AST","TB","ALP")))
expect_is(generateSettings(standard="ADaM"),"list")
expect_equal(sort(names(generateSettings(standard="ADaM"))),sort(setting_names))
expect_equal(sort(names(generateSettings(standard="ADaM")[["measure_values"]])), sort(c("ALT","AST","TB","ALP")))
expect_is(generateSettings(standard="SDTM"),"list")
expect_equal(sort(names(generateSettings(standard="SDTM"))),sort(setting_names))
expect_equal(sort(names(generateSettings(standard="SDTM")[["measure_values"]])), sort(c("ALT","AST","TB","ALP")))})
test_that("a warning is thrown if chart isn't found in the chart list",{
expect_error(generateSettings(chart="aeexplorer"))
expect_error(generateSettings(chart=""))
expect_silent(generateSettings(chart="hepExplorer"))
expect_silent(generateSettings(chart="hepexplorer"))
expect_silent(generateSettings(chart="HepexploreR"))
})
test_that("data mappings are null when setting=none, character otherwise",{
data_setting_keys<-c("id_col", "value_col", "measure_col", "normal_col_low", "normal_col_high", "studyday_col","measure_values--ALT","measure_values--ALP","measure_values--TB","measure_values--AST")
none_settings <- generateSettings(standard="None")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_equal(getSettingValue(settings=none_settings,key=key),NULL)
}
other_settings <- generateSettings(standard="a different standard")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_equal(getSettingValue(settings=other_settings,key=key),NULL)
}
sdtm_settings <- generateSettings(standard="SDTM")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=sdtm_settings,key=key),"character")
}
sdtm_settings2 <- generateSettings(standard="SdTm")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=sdtm_settings2,key=key),"character")
}
adam_settings <- generateSettings(standard="ADaM")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=adam_settings,key=key),"character")
}
adam_settings2 <- generateSettings(standard="ADAM")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=adam_settings2,key=key),"character")
}
# Test Partial Spec Match
partial_adam_settings <- generateSettings(standard="adam", partial=TRUE, partial_keys = c("id_col","measure_col","measure_values--ALT"))
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
if (text_key %in% c("id_col","measure_col","measure_values--ALT")) {
expect_is(getSettingValue(settings=partial_adam_settings,key=key),"character")
} else {
expect_equal(getSettingValue(settings=partial_adam_settings,key=key),NULL)
}
}
#Testing that partial cols are only used when partial=TRUE
full_adam_partial_cols <- generateSettings(standard="ADaM", partial_keys = c("id_col","measure_col","measure_values--ALT"))
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=full_adam_partial_cols,key=key),"character")
}
#Testing failure when partial is true with no specified columns
expect_error(partial_settings_no_cols <- generateSettings(standard="ADaM", partial=TRUE))
#Test useDefaults
noDefaults <- generateSettings(standard="adam",useDefaults=FALSE)
option_keys<-c("x_options", "y_options", "visit_window", "r_ratio_filter", "r_ratio_cut", "showTitle", "warningText")
#non data mappings are NA
for(text_key in option_keys){
key<-textKeysToList(text_key)[[1]]
expect_equal(getSettingValue(settings=noDefaults,key=key),NULL)
}
#data mappings are filled as expected
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=noDefaults,key=key),"character")
}
#Test customSettings
customizations<- tibble(text_key=c("id_col","warningText","measure_values--ALT"),customValue=c("customID","This is a custom warning","custom ALT"))
customSettings<-generateSettings(standard="adam",custom_settings=customizations)
expect_equal(getSettingValue(settings=customSettings,key=list("id_col")),"customID")
expect_equal(getSettingValue(settings=customSettings,key=list("warningText")),"This is a custom warning")
expect_equal(getSettingValue(settings=customSettings,key=list("measure_values","ALT")),"custom ALT")
expect_equal(getSettingValue(settings=customSettings,key=list("measure_col")),"PARAM")
})
|
/tests/testthat/test_generateSettings.R
|
no_license
|
mli1/safetyGraphics
|
R
| false
| false
| 5,567
|
r
|
context("Tests for the generateSettings() function")
library(safetyGraphics)
setting_names<-c("id_col","value_col","measure_col","normal_col_low","normal_col_high","studyday_col", "visit_col", "visitn_col", "filters","group_cols", "measure_values", "baseline", "analysisFlag", "x_options", "y_options", "visit_window", "r_ratio_filter", "r_ratio_cut", "showTitle", "warningText", "unit_col", "start_value", "details", "missingValues", "unscheduled_visit_pattern","unscheduled_visits","visits_without_data",'calculate_palt')
test_that("a list with the expected properties and structure is returned for all standards",{
expect_is(generateSettings(standard="None"),"list")
expect_equal(sort(names(generateSettings(standard="None"))),sort(setting_names))
expect_equal(sort(names(generateSettings(standard="None")[["measure_values"]])), sort(c("ALT","AST","TB","ALP")))
expect_is(generateSettings(standard="ADaM"),"list")
expect_equal(sort(names(generateSettings(standard="ADaM"))),sort(setting_names))
expect_equal(sort(names(generateSettings(standard="ADaM")[["measure_values"]])), sort(c("ALT","AST","TB","ALP")))
expect_is(generateSettings(standard="SDTM"),"list")
expect_equal(sort(names(generateSettings(standard="SDTM"))),sort(setting_names))
expect_equal(sort(names(generateSettings(standard="SDTM")[["measure_values"]])), sort(c("ALT","AST","TB","ALP")))})
test_that("a warning is thrown if chart isn't found in the chart list",{
expect_error(generateSettings(chart="aeexplorer"))
expect_error(generateSettings(chart=""))
expect_silent(generateSettings(chart="hepExplorer"))
expect_silent(generateSettings(chart="hepexplorer"))
expect_silent(generateSettings(chart="HepexploreR"))
})
test_that("data mappings are null when setting=none, character otherwise",{
data_setting_keys<-c("id_col", "value_col", "measure_col", "normal_col_low", "normal_col_high", "studyday_col","measure_values--ALT","measure_values--ALP","measure_values--TB","measure_values--AST")
none_settings <- generateSettings(standard="None")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_equal(getSettingValue(settings=none_settings,key=key),NULL)
}
other_settings <- generateSettings(standard="a different standard")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_equal(getSettingValue(settings=other_settings,key=key),NULL)
}
sdtm_settings <- generateSettings(standard="SDTM")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=sdtm_settings,key=key),"character")
}
sdtm_settings2 <- generateSettings(standard="SdTm")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=sdtm_settings2,key=key),"character")
}
adam_settings <- generateSettings(standard="ADaM")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=adam_settings,key=key),"character")
}
adam_settings2 <- generateSettings(standard="ADAM")
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=adam_settings2,key=key),"character")
}
# Test Partial Spec Match
partial_adam_settings <- generateSettings(standard="adam", partial=TRUE, partial_keys = c("id_col","measure_col","measure_values--ALT"))
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
if (text_key %in% c("id_col","measure_col","measure_values--ALT")) {
expect_is(getSettingValue(settings=partial_adam_settings,key=key),"character")
} else {
expect_equal(getSettingValue(settings=partial_adam_settings,key=key),NULL)
}
}
#Testing that partial cols are only used when partial=TRUE
full_adam_partial_cols <- generateSettings(standard="ADaM", partial_keys = c("id_col","measure_col","measure_values--ALT"))
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=full_adam_partial_cols,key=key),"character")
}
#Testing failure when partial is true with no specified columns
expect_error(partial_settings_no_cols <- generateSettings(standard="ADaM", partial=TRUE))
#Test useDefaults
noDefaults <- generateSettings(standard="adam",useDefaults=FALSE)
option_keys<-c("x_options", "y_options", "visit_window", "r_ratio_filter", "r_ratio_cut", "showTitle", "warningText")
#non data mappings are NA
for(text_key in option_keys){
key<-textKeysToList(text_key)[[1]]
expect_equal(getSettingValue(settings=noDefaults,key=key),NULL)
}
#data mappings are filled as expected
for(text_key in data_setting_keys){
key<-textKeysToList(text_key)[[1]]
expect_is(getSettingValue(settings=noDefaults,key=key),"character")
}
#Test customSettings
customizations<- tibble(text_key=c("id_col","warningText","measure_values--ALT"),customValue=c("customID","This is a custom warning","custom ALT"))
customSettings<-generateSettings(standard="adam",custom_settings=customizations)
expect_equal(getSettingValue(settings=customSettings,key=list("id_col")),"customID")
expect_equal(getSettingValue(settings=customSettings,key=list("warningText")),"This is a custom warning")
expect_equal(getSettingValue(settings=customSettings,key=list("measure_values","ALT")),"custom ALT")
expect_equal(getSettingValue(settings=customSettings,key=list("measure_col")),"PARAM")
})
|
#' Create a response curve for the *Catch Limit Algorithm*
#'
#' @details
#'
#' @param orig A \code{data.frame} of \code{RESOUT.RRR}
#' @param alt A \code{data.frame} of \code{RESOUT.RRR}
#' @param set A \code{data.frame} of trials with specific names and attributes.
#' @param out A character value to save the plot to, no extension necessary.
#' If \code{NULL} then the plot will print to the screen rather than to the disk.
#' @param part Can take the value of \code{1} or \code {2}, where the comparisons
#' are made across different parts of the data.
#' @return A plot is printed to the disk according the the file name specified in \code{out}
#' or if out is \code{NULL} the plot is printed to the screen.
##' @seealso \code{\link{functionname}}
#' @author Kelli Faye Johnson
#' @export
plot_curve <- function(plot1, plot2, plot3, plot4, set, out = NULL, part = 1) {
myplot <- function(orig, alt, little = 0.6, big = 0.95, label = "",
limtc = c(0, 2.5), limpf = c(0, 1), limaa = c(0, 0.8)) {
num <- dim(orig)[1]
numa <- dim(alt)[1]
symb <- LETTERS[1:num]
xlim <- c(-1, num + 2)
hdmlt <- 1.08
axis1 <- seq(limtc[1], limtc[2], by = 0.25)
axis1 <- axis1[seq(2, length(axis1), by = 2)]
axis2 <- seq(limpf[1], limpf[2], by = 0.1)
axis2 <- axis2[seq(2, length(axis2), by = 2)]
littlelabtxt <- c("Orig", "Alt")
if (part == 2) littlelabtxt <- c("1%", "4%")
littlelab <- -0.6
errbar(x = 1:num, y = orig[, 2], yplus = orig[, 4], frame.plot = FALSE,
yminus = orig[, 3], xaxt = "n", ylim = limtc, pch = symb, xlim = xlim)
axis(2, at = axis1, label = FALSE)
mtext(side = 3, label, padj = 0, line = 1.1, cex = big * 0.8)
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
errbar(x = 1:numa, y = alt[, 2], yplus = alt[, 4], frame.plot = FALSE,
yminus = alt[, 3], xaxt = "n", ylim = limtc, pch = symb, xlim = xlim, yaxt = "n")
axis(2, labels = FALSE); axis(2, axis1, label = FALSE)
mtext(side = 3, littlelabtxt[2], littlelab, cex = little)
text(x = xlim[1], y = limtc[2] * hdmlt, "Total catch", cex = big, xpd = NA)
errbar(x = 1:num, y = orig[, 6], yplus = orig[, 8], frame.plot = FALSE,
yminus = orig[, 7], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim)
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
axis(2, at = axis2, label = FALSE)
errbar(x = 1:numa, y = alt[, 6], yplus = alt[, 8], frame.plot = FALSE,
yminus = alt[, 7], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim, yaxt = "n")
axis(2, labels = FALSE); axis(2, at = axis2, label = FALSE)
mtext(side = 3, littlelabtxt[2], littlelab, cex = little)
text(x = xlim[1], y = limpf[2] * hdmlt, "Final size", cex = big, xpd = NA)
errbar(x = 1:num, y = orig[, 12], yplus = orig[, 14], frame.plot = FALSE,
yminus = orig[, 13], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim)
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
axis(2, at = axis2, label = FALSE)
errbar(x = 1:numa, y = alt[, 12], yplus = alt[, 14], frame.plot = FALSE,
yminus = alt[, 13], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim, yaxt = "n")
axis(2, labels = FALSE); axis(2, at = axis2, label = FALSE)
mtext(side = 3, littlelabtxt[2], littlelab, cex = little)
text(x = xlim[1], y = limpf[2] * hdmlt, "Lowest size", cex = big, xpd = NA)
y <- as.numeric(orig[, 24])
plot(x = 1:numa, y = y + limaa[2] / 2, ylim = limaa, xaxt = "n", pch = symb,
frame.plot = FALSE, yaxt = "n", xlim = c(0, num))
y <- as.numeric(as.character(alt[, 24]))
points(x = 1:num, y = y, ylim = limaa, xaxt = "n", pch = symb, yaxt = "n")
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
axis(2, at = seq(0, limaa[2], length.out = 9),
labels = c(0, seq(limaa[2]/8, limaa[2]/2, length.out = 4),
seq(limaa[2]/8, limaa[2]/2, length.out = 4)))
mtext(side = 3, littlelabtxt[2], line = littlelab - 10, cex = little)
text(x = num/2, y = limaa[2] * hdmlt, "AAV", cex = big, xpd = NA)
abline(h = limaa[2] / 2, xpd = FALSE)
}
mars <- c(1.0, 0.1, 1.0, 0.1)
if (part == 1) {
if (!is.null(out)) {
jpeg(paste0(out, ".jpeg"), res = 100, width = 1100, height = 600)
}
par(mfrow = c(2, 15), las = 1, mar = mars, oma = c(0.2, 3, 2.5, 0.2),
tck = 0.05, mgp = c(3, 0.1, 0))
seta <- set[grepl("^F", set$name), ]
plota <- data.frame(plot1[match(seta$name, plot1$trial), ],
stringsAsFactors = FALSE)
plota$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plota$AAV)
plotb <- data.frame(plot2[match(seta$name, plot2$trial), ],
stringsAsFactors = FALSE)
plotb$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotb$AAV)
setb <- set[grepl("^M", set$name), ]
plotc <- data.frame(plot1[match(setb$name, plot1$trial), ],
stringsAsFactors = FALSE)
plotc$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotc$AAV)
plotd <- data.frame(plot2[match(setb$name, plot2$trial), ],
stringsAsFactors = FALSE)
plotd$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotd$AAV)
myplot(plota, plotb, label = "Fecundity")
plot(0, 0, type = "n", frame.plot = FALSE, xaxt = "n", yaxt = "n")
myplot(plotc, plotd, label = "Natural Morality")
seta <- set[grepl("^F", set$name), ]
plota <- data.frame(plot3[match(seta$name, plot3$trial), ],
stringsAsFactors = FALSE)
plota$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plota$AAV)
plotb <- data.frame(plot4[match(seta$name, plot4$trial), ],
stringsAsFactors = FALSE)
plotb$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotb$AAV)
setb <- set[grepl("^M", set$name), ]
plotc <- data.frame(plot3[match(setb$name, plot3$trial), ],
stringsAsFactors = FALSE)
plotc$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotc$AAV)
plotd <- data.frame(plot4[match(setb$name, plot4$trial), ],
stringsAsFactors = FALSE)
plotd$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotd$AAV)
myplot(plota, plotb, label = "", limtc = c(0, 7.5))
myplot(plotc, plotd, label = "", limtc = c(0, 7.5))
if (!is.null(out)) {
dev.off()
}
}
if (part == 2) {
if (!is.null(out)) {
jpeg(paste0(out, ".jpeg"), res = 100, width = 900, height = 260)
}
par(mfrow = c(1, 15), las = 1, mar = mars, oma = c(0.05, 3, 1.2, 0.2),
tck = 0.05, mgp = c(3, 0.1, 0))
all <- rbind(plot1, plot2, plot3, plot4)
all$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", all$AAV)
plota <- subset(all, grepl("^F1", trial) & trial %in% keep$name)
plotb <- subset(all, grepl("^F2", trial) & trial %in% keep$name)
myplot(plota, plotb, limtc = c(0, 7.5), label = "Fecundity")
plotc <- subset(all, grepl("^M1", trial) & trial %in% keep$name)
plotd <- subset(all, grepl("^M2", trial) & trial %in% keep$name)
plot(0, 0, type = "n", frame.plot = FALSE, xaxt = "n", yaxt = "n")
myplot(plotc, plotd, limtc = c(0, 7.5), label = "Natural Mortality")
if (!is.null(out)) {
dev.off()
}
}
}
|
/R/plot_curve.R
|
no_license
|
iagomosqueira/iwccla
|
R
| false
| false
| 7,036
|
r
|
#' Create a response curve for the *Catch Limit Algorithm*
#'
#' @details
#'
#' @param orig A \code{data.frame} of \code{RESOUT.RRR}
#' @param alt A \code{data.frame} of \code{RESOUT.RRR}
#' @param set A \code{data.frame} of trials with specific names and attributes.
#' @param out A character value to save the plot to, no extension necessary.
#' If \code{NULL} then the plot will print to the screen rather than to the disk.
#' @param part Can take the value of \code{1} or \code {2}, where the comparisons
#' are made across different parts of the data.
#' @return A plot is printed to the disk according the the file name specified in \code{out}
#' or if out is \code{NULL} the plot is printed to the screen.
##' @seealso \code{\link{functionname}}
#' @author Kelli Faye Johnson
#' @export
plot_curve <- function(plot1, plot2, plot3, plot4, set, out = NULL, part = 1) {
myplot <- function(orig, alt, little = 0.6, big = 0.95, label = "",
limtc = c(0, 2.5), limpf = c(0, 1), limaa = c(0, 0.8)) {
num <- dim(orig)[1]
numa <- dim(alt)[1]
symb <- LETTERS[1:num]
xlim <- c(-1, num + 2)
hdmlt <- 1.08
axis1 <- seq(limtc[1], limtc[2], by = 0.25)
axis1 <- axis1[seq(2, length(axis1), by = 2)]
axis2 <- seq(limpf[1], limpf[2], by = 0.1)
axis2 <- axis2[seq(2, length(axis2), by = 2)]
littlelabtxt <- c("Orig", "Alt")
if (part == 2) littlelabtxt <- c("1%", "4%")
littlelab <- -0.6
errbar(x = 1:num, y = orig[, 2], yplus = orig[, 4], frame.plot = FALSE,
yminus = orig[, 3], xaxt = "n", ylim = limtc, pch = symb, xlim = xlim)
axis(2, at = axis1, label = FALSE)
mtext(side = 3, label, padj = 0, line = 1.1, cex = big * 0.8)
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
errbar(x = 1:numa, y = alt[, 2], yplus = alt[, 4], frame.plot = FALSE,
yminus = alt[, 3], xaxt = "n", ylim = limtc, pch = symb, xlim = xlim, yaxt = "n")
axis(2, labels = FALSE); axis(2, axis1, label = FALSE)
mtext(side = 3, littlelabtxt[2], littlelab, cex = little)
text(x = xlim[1], y = limtc[2] * hdmlt, "Total catch", cex = big, xpd = NA)
errbar(x = 1:num, y = orig[, 6], yplus = orig[, 8], frame.plot = FALSE,
yminus = orig[, 7], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim)
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
axis(2, at = axis2, label = FALSE)
errbar(x = 1:numa, y = alt[, 6], yplus = alt[, 8], frame.plot = FALSE,
yminus = alt[, 7], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim, yaxt = "n")
axis(2, labels = FALSE); axis(2, at = axis2, label = FALSE)
mtext(side = 3, littlelabtxt[2], littlelab, cex = little)
text(x = xlim[1], y = limpf[2] * hdmlt, "Final size", cex = big, xpd = NA)
errbar(x = 1:num, y = orig[, 12], yplus = orig[, 14], frame.plot = FALSE,
yminus = orig[, 13], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim)
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
axis(2, at = axis2, label = FALSE)
errbar(x = 1:numa, y = alt[, 12], yplus = alt[, 14], frame.plot = FALSE,
yminus = alt[, 13], xaxt = "n", ylim = limpf, pch = symb, xlim = xlim, yaxt = "n")
axis(2, labels = FALSE); axis(2, at = axis2, label = FALSE)
mtext(side = 3, littlelabtxt[2], littlelab, cex = little)
text(x = xlim[1], y = limpf[2] * hdmlt, "Lowest size", cex = big, xpd = NA)
y <- as.numeric(orig[, 24])
plot(x = 1:numa, y = y + limaa[2] / 2, ylim = limaa, xaxt = "n", pch = symb,
frame.plot = FALSE, yaxt = "n", xlim = c(0, num))
y <- as.numeric(as.character(alt[, 24]))
points(x = 1:num, y = y, ylim = limaa, xaxt = "n", pch = symb, yaxt = "n")
mtext(side = 3, littlelabtxt[1], littlelab, cex = little)
axis(2, at = seq(0, limaa[2], length.out = 9),
labels = c(0, seq(limaa[2]/8, limaa[2]/2, length.out = 4),
seq(limaa[2]/8, limaa[2]/2, length.out = 4)))
mtext(side = 3, littlelabtxt[2], line = littlelab - 10, cex = little)
text(x = num/2, y = limaa[2] * hdmlt, "AAV", cex = big, xpd = NA)
abline(h = limaa[2] / 2, xpd = FALSE)
}
mars <- c(1.0, 0.1, 1.0, 0.1)
if (part == 1) {
if (!is.null(out)) {
jpeg(paste0(out, ".jpeg"), res = 100, width = 1100, height = 600)
}
par(mfrow = c(2, 15), las = 1, mar = mars, oma = c(0.2, 3, 2.5, 0.2),
tck = 0.05, mgp = c(3, 0.1, 0))
seta <- set[grepl("^F", set$name), ]
plota <- data.frame(plot1[match(seta$name, plot1$trial), ],
stringsAsFactors = FALSE)
plota$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plota$AAV)
plotb <- data.frame(plot2[match(seta$name, plot2$trial), ],
stringsAsFactors = FALSE)
plotb$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotb$AAV)
setb <- set[grepl("^M", set$name), ]
plotc <- data.frame(plot1[match(setb$name, plot1$trial), ],
stringsAsFactors = FALSE)
plotc$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotc$AAV)
plotd <- data.frame(plot2[match(setb$name, plot2$trial), ],
stringsAsFactors = FALSE)
plotd$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotd$AAV)
myplot(plota, plotb, label = "Fecundity")
plot(0, 0, type = "n", frame.plot = FALSE, xaxt = "n", yaxt = "n")
myplot(plotc, plotd, label = "Natural Morality")
seta <- set[grepl("^F", set$name), ]
plota <- data.frame(plot3[match(seta$name, plot3$trial), ],
stringsAsFactors = FALSE)
plota$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plota$AAV)
plotb <- data.frame(plot4[match(seta$name, plot4$trial), ],
stringsAsFactors = FALSE)
plotb$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotb$AAV)
setb <- set[grepl("^M", set$name), ]
plotc <- data.frame(plot3[match(setb$name, plot3$trial), ],
stringsAsFactors = FALSE)
plotc$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotc$AAV)
plotd <- data.frame(plot4[match(setb$name, plot4$trial), ],
stringsAsFactors = FALSE)
plotd$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", plotd$AAV)
myplot(plota, plotb, label = "", limtc = c(0, 7.5))
myplot(plotc, plotd, label = "", limtc = c(0, 7.5))
if (!is.null(out)) {
dev.off()
}
}
if (part == 2) {
if (!is.null(out)) {
jpeg(paste0(out, ".jpeg"), res = 100, width = 900, height = 260)
}
par(mfrow = c(1, 15), las = 1, mar = mars, oma = c(0.05, 3, 1.2, 0.2),
tck = 0.05, mgp = c(3, 0.1, 0))
all <- rbind(plot1, plot2, plot3, plot4)
all$AAV <- gsub("[[:punct:]]$|[[:space:]]", "", all$AAV)
plota <- subset(all, grepl("^F1", trial) & trial %in% keep$name)
plotb <- subset(all, grepl("^F2", trial) & trial %in% keep$name)
myplot(plota, plotb, limtc = c(0, 7.5), label = "Fecundity")
plotc <- subset(all, grepl("^M1", trial) & trial %in% keep$name)
plotd <- subset(all, grepl("^M2", trial) & trial %in% keep$name)
plot(0, 0, type = "n", frame.plot = FALSE, xaxt = "n", yaxt = "n")
myplot(plotc, plotd, limtc = c(0, 7.5), label = "Natural Mortality")
if (!is.null(out)) {
dev.off()
}
}
}
|
## -------------------------------------------------------------------
check_type <- function(x) {
## Types to check for.
out <-
c(is.numeric(x),
is.logical(x),
is.factor(x),
is.character(x),
all(is.na(x)))
## Index of type
out <- which.max(out)
## Check column type is defined. "5" is the number if specifies
## types.
if (! (out %in% 1:5)) stop("Column type not recognized.")
## Return the column type.
switch(out, "numerical",
"logical",
"factor",
"categorical",
"NA only")
}
summarize_num_vector <- function(x, fun, ...) {
if (!(is.numeric(x) | is.logical(x))) return (NA)
if (all(is.na(x))) return (NA)
fun(x, ...)
}
##' @importFrom dplyr %>% transmute
##' @importFrom forcats fct_count
print_tally <- function(x, n = 5) {
levels <- unique(x)
n_levels <- length(levels)
f <- "ignoreme"
## if (is.na(n_levels)) return (NA)
if (n_levels > n) return("Too many unique values")
## Tally factor levels
out <- x %>%
as.character() %>%
fct_count()
## Create column with counts
out <- transmute(out, count = paste0(f, ": ", n))$count
## Return a single string with tallied values separated by commas.
paste0(out, collapse = ", ")
}
count_distinc_values <- function (x) {
distinct_values <- length(unique(x))
na_present <- any(is.na(x))
distinct_values - na_present
}
##' Glance Data
##'
##' Provides a summary of data with the the following columns:
##' \describe{
##' \item{\code{name}}{Name of the column.}
##' \item{\code{type}}{Type of the column, equal to "numerical",
##' "logical", "factor", "categorical", or "NA only".}
##' \item{\code{distinct_values}}{Count of distinct values. It ignores
##' NA values. Thus, if a columns only has NAs, then the value of this
##' field will be zero.}
##' \item{\code{minimum}}{Minimum of numerical columns excluding NA
##' values.}
##' \item{\code{median}}{Median of numerical columns excluding NA
##' values.}
##' \item{\code{maximum}}{Maximum of numerical columns excluding NA
##' values.}
##' \item{\code{mean}}{Mean of numerical variables. It ignores NAs.}
##' \item{\code{sd}}{Standard deviation of numerical variables. It
##' ignores NAs.}
##' \item{\code{na_proportion}}{Proportion of NAs.}
##' \item{\code{count}}{Tally of values if the column has 5 values at
##' most. This value (5) can be modified with the parameter
##' \code{limit2tally}.}
##' \item{\code{sample_values}}{Sample of (different) values in each
##' column.}
##' }
##'
##' @param x A dataframe with named columns.
##' @param limit2tally One of the summaries is a tally of the distinct
##' values on each column. If there are too many different values
##' in a column, this summary would be meaningless. This
##' \code{limit2tally} is the limit of distinct values to
##' tally. If there are more than that it returns
##' "Too many unique values".
##' @return A \code{tibble}.
##' @importFrom tibble tibble
##' @importFrom purrr map_chr map_dbl map_int
##' @importFrom stats median sd
##' @importFrom dplyr %>%
##' @importFrom utils head
##' @examples
##' glance_data(iris)
##' @author Guillermo Basulto-Elias
##' @export
glance_data <- function(x, limit2tally = 5) {
x <- as.list(x)
tibble(
name = names(x),
type = map_chr(x, check_type),
distinct_values = map_int(x, count_distinc_values),
minimum =
map_dbl(x, summarize_num_vector, min, na.rm = TRUE),
median =
map_dbl(x, summarize_num_vector,
median, na.rm = TRUE),
maximum =
map_dbl(x, summarize_num_vector, max, na.rm = TRUE),
mean =
map_dbl(x, summarize_num_vector, mean, na.rm = TRUE),
sd =
map_dbl(x, summarize_num_vector, sd, na.rm = TRUE),
na_proportion =
map_dbl(x, ~ mean(is.na(.x))),
count = map_chr(x, print_tally,
n = limit2tally),
sample_values =
map_chr(x,
function(y) {
y %>%
unique() %>%
head() %>%
paste(collapse = ", ")
})
)
}
|
/R/glance_data.R
|
permissive
|
gbasulto/rmiscfun
|
R
| false
| false
| 4,255
|
r
|
## -------------------------------------------------------------------
check_type <- function(x) {
## Types to check for.
out <-
c(is.numeric(x),
is.logical(x),
is.factor(x),
is.character(x),
all(is.na(x)))
## Index of type
out <- which.max(out)
## Check column type is defined. "5" is the number if specifies
## types.
if (! (out %in% 1:5)) stop("Column type not recognized.")
## Return the column type.
switch(out, "numerical",
"logical",
"factor",
"categorical",
"NA only")
}
summarize_num_vector <- function(x, fun, ...) {
if (!(is.numeric(x) | is.logical(x))) return (NA)
if (all(is.na(x))) return (NA)
fun(x, ...)
}
##' @importFrom dplyr %>% transmute
##' @importFrom forcats fct_count
print_tally <- function(x, n = 5) {
levels <- unique(x)
n_levels <- length(levels)
f <- "ignoreme"
## if (is.na(n_levels)) return (NA)
if (n_levels > n) return("Too many unique values")
## Tally factor levels
out <- x %>%
as.character() %>%
fct_count()
## Create column with counts
out <- transmute(out, count = paste0(f, ": ", n))$count
## Return a single string with tallied values separated by commas.
paste0(out, collapse = ", ")
}
count_distinc_values <- function (x) {
distinct_values <- length(unique(x))
na_present <- any(is.na(x))
distinct_values - na_present
}
##' Glance Data
##'
##' Provides a summary of data with the the following columns:
##' \describe{
##' \item{\code{name}}{Name of the column.}
##' \item{\code{type}}{Type of the column, equal to "numerical",
##' "logical", "factor", "categorical", or "NA only".}
##' \item{\code{distinct_values}}{Count of distinct values. It ignores
##' NA values. Thus, if a columns only has NAs, then the value of this
##' field will be zero.}
##' \item{\code{minimum}}{Minimum of numerical columns excluding NA
##' values.}
##' \item{\code{median}}{Median of numerical columns excluding NA
##' values.}
##' \item{\code{maximum}}{Maximum of numerical columns excluding NA
##' values.}
##' \item{\code{mean}}{Mean of numerical variables. It ignores NAs.}
##' \item{\code{sd}}{Standard deviation of numerical variables. It
##' ignores NAs.}
##' \item{\code{na_proportion}}{Proportion of NAs.}
##' \item{\code{count}}{Tally of values if the column has 5 values at
##' most. This value (5) can be modified with the parameter
##' \code{limit2tally}.}
##' \item{\code{sample_values}}{Sample of (different) values in each
##' column.}
##' }
##'
##' @param x A dataframe with named columns.
##' @param limit2tally One of the summaries is a tally of the distinct
##' values on each column. If there are too many different values
##' in a column, this summary would be meaningless. This
##' \code{limit2tally} is the limit of distinct values to
##' tally. If there are more than that it returns
##' "Too many unique values".
##' @return A \code{tibble}.
##' @importFrom tibble tibble
##' @importFrom purrr map_chr map_dbl map_int
##' @importFrom stats median sd
##' @importFrom dplyr %>%
##' @importFrom utils head
##' @examples
##' glance_data(iris)
##' @author Guillermo Basulto-Elias
##' @export
glance_data <- function(x, limit2tally = 5) {
x <- as.list(x)
tibble(
name = names(x),
type = map_chr(x, check_type),
distinct_values = map_int(x, count_distinc_values),
minimum =
map_dbl(x, summarize_num_vector, min, na.rm = TRUE),
median =
map_dbl(x, summarize_num_vector,
median, na.rm = TRUE),
maximum =
map_dbl(x, summarize_num_vector, max, na.rm = TRUE),
mean =
map_dbl(x, summarize_num_vector, mean, na.rm = TRUE),
sd =
map_dbl(x, summarize_num_vector, sd, na.rm = TRUE),
na_proportion =
map_dbl(x, ~ mean(is.na(.x))),
count = map_chr(x, print_tally,
n = limit2tally),
sample_values =
map_chr(x,
function(y) {
y %>%
unique() %>%
head() %>%
paste(collapse = ", ")
})
)
}
|
library(data.table)
#mouse
scrna.count<-data.frame(fread("~/data/integration3/molecules.integrate_mRNA.txt",header=T,sep="\t"),row.names=1)
pbmc.ident <- data.frame(fread("./../matrix/mouse/pbmc.ident_low.txt",header=T,sep="\t"))
#human
#scrna.count<-data.frame(fread("~/data/integration4/molecules.integrate_mRNA.txt",header=T,sep="\t"),row.names=1)
#pbmc.ident <- data.frame(fread("./../matrix/human/pbmc.ident_low.txt",header=T,sep="\t"))
scrna.count<-scrna.count[,pbmc.ident$samples]
#create metadata new new new new
metadata.integrate<-matrix(ncol=2,nrow=ncol(scrna.count))
metadata.integrate[,1]<-colnames(scrna.count)
metadata.integrate[,2]<-substr(colnames(scrna.count), 1, 2)
colnames(metadata.integrate)<-c("cellID","tech")
metadata.integrate<-data.frame(metadata.integrate)
row.names(metadata.integrate)<-metadata.integrate[,1]
metadata.integrate$tech <- as.factor(metadata.integrate$tech)
levels(metadata.integrate$tech)
scrna.count<-t(scrna.count)
scrna.1.stage<-data.frame(scrna.count)
#normalize matrix
scrna.1.stage<-log10(scrna.1.stage+1)
########
#using gini
gini.index <-function(x){
x <- sort(x)
G <- sum(x * 1L:length(x))
G <- 2 * G/sum(x) - (length(x) + 1L)
G/length(x)
}
#using marker gene sets
load("/www/data/TCA/matrix/mouse/pbmc.markers_reference.rda")
#load("/www/data/TCA/matrix/human/pbmc.markers_sct.rda")
pbmc.markers<-subset(pbmc.markers, avg_logFC > 0.25)
pbmc.markers$cluster<-as.numeric(as.character(pbmc.markers$cluster))
pbmc.markers$cluster<-pbmc.markers$cluster+1
unique(pbmc.markers$cluster)
geneSets<- vector('list', max(pbmc.markers$cluster))
for (i in 1:max(pbmc.markers$cluster)){
pbmc.markers_sub<-subset(pbmc.markers, pbmc.markers$cluster == i)
geneSets[[i]]<-row.names(pbmc.markers_sub)
genes_tmp<-geneSets[[i]]
genes_tmp<-intersect(genes_tmp, colnames(scrna.1.stage))
geneSets[[i]]<-genes_tmp
}
gini.sum.names<-names(geneSets)
gini.sum<-character(0)
for (i in 1:length(geneSets)){
if (i %% 10 == 0) print(i)
genes_tmp<-geneSets[[i]]
genes_tmp<-intersect(genes_tmp, colnames(scrna.1.stage))
scrna.1.stage_sub<-scrna.1.stage[, genes_tmp]
gini.tmp <- apply(scrna.1.stage_sub, 1, gini.index)
gini.sum<-data.frame(cbind(gini.sum, gini.tmp))
gini.sum[,i]<-as.numeric(as.character(gini.sum[,i]))
}
colnames(gini.sum)<-gini.sum.names
gini.sum<-data.frame(gini.sum)
gini.sum[1:3,1:3]
for(i in 1:ncol(gini.sum)){
gini.sum[is.na(gini.sum[,i]), i] <- mean(gini.sum[,i], na.rm = TRUE)
}
library(dplyr)
save(gini.sum, file = "./ML/gini.sum_human.rda")
load(file = "./ML/gini.sum_human.rda")
gini.sum <- tibble::rownames_to_column(gini.sum, "samples")
scrna_sub_join<-dplyr::inner_join(pbmc.ident,gini.sum,by="samples")
#row.names(scrna_sub_join)<-scrna_sub_join$samples
scrna_sub_join<-scrna_sub_join[,-1];scrna_sub_join<-scrna_sub_join[,-1];colnames(scrna_sub_join)[1]<-"identity" #cluster
scrna_sub_join<-data.frame(scrna_sub_join)
#scrna_sub_join<-round(scrna_sub_join,4)
scrna_sub_join[1:3,1:3]
apply(is.na(scrna_sub_join), 2, which)
########
#using markers
#select features
marker.stage<-data.frame(fread("./../matrix/pbmc.markers.csv",header=T,sep=",",quote=""),row.names=1)
marker_sub<-subset(marker.stage,marker.stage$p_val_adj<0.001 & marker.stage$avg_logFC > 1)
markers.string<-unique(marker_sub$gene)
features<-intersect(markers.string,colnames(scrna.1.stage))
load("/www/data/TCA/matrix/human/pbmc.markers_sct.rda")
pbmc.markers<-subset(pbmc.markers, avg_logFC > 7.5 & p_val_adj<0.001)
markers.string<-pbmc.markers$gene
markers.string<-unique(markers.string)
features<-intersect(markers.string,colnames(scrna.1.stage))
#features<-c("Pou5f2","RP24-341O14.2","RP23-66E21.1","Phf2","Ssna1","Srek1","Ankar","Dnah8","RP23-440F7.2","RP24-300N16.3","Hnrnpm","Rps27a","Rdh11","Tchp","Ccp110","RP23-448A11.15","RP23-412L13.2","Gmcl1","Gm26799")
library(dplyr)
scrna_sub <- scrna.1.stage %>% dplyr::select(one_of(features))
scrna_sub <- tibble::rownames_to_column(scrna_sub, "samples")
scrna_sub_join<-inner_join(pbmc.ident,scrna_sub,by="samples")
#scrna_sub_join<-scrna_sub_join[,-1];scrna_sub_join<-scrna_sub_join[,-1] #identity
scrna_sub_join<-scrna_sub_join[,-1];scrna_sub_join<-scrna_sub_join[,-1];colnames(scrna_sub_join)[1]<-"identity" #cluster
scrna_sub_join<-data.frame(scrna_sub_join)
# random split samples into training and validation
scrna_sub_join$identity <- as.factor(scrna_sub_join$identity)
index <- sample(2,nrow(scrna_sub_join),replace = TRUE,prob=c(0.7,0.3))
traindata <- scrna_sub_join[index==1,]
testdata <- scrna_sub_join[index==2,]
#### SVM
library(e1071)
cats_svm_model <- svm(identity~.,data=traindata,type = "C")
cats_svm_model
#save(cats_svm_model, file = "./ML/human_classification_marker_svm.RData")
#save(cats_svm_model, file = "./ML/mouse_classification_marker_svm.RData")
#save(cats_svm_model, file = "./ML/human_classification_gini.RData")
#save(cats_svm_model, file = "./ML/mouse_classification_gini.RData")
#### random forest
library(randomForest)
cats_svm_model <- randomForest(identity~.,data=traindata,ntree=100)
plot(cats_svm_model)
cats_svm_model
#save(cats_svm_model, file = "./ML/human_classification_marker_forest.RData")
save(cats_svm_model, file = "./ML/mouse_classification_marker_forest.RData")
# training
cats_svm_model_pred_1 <- predict(cats_svm_model,traindata[,-1])
cats_table_1 <- table(pred=cats_svm_model_pred_1,true=traindata[,1])
accuracy_1 <- sum(diag(cats_table_1))/sum(cats_table_1)
accuracy_1
# validation
cats_svm_model_pred_2 <- predict(cats_svm_model,testdata[,-1])
cats_table_2 <- table(pred=cats_svm_model_pred_2,true=testdata[,1])
#cats_table_2
accuracy_2 <- sum(diag(cats_table_2))/sum(cats_table_2)
accuracy_2
|
/04_train_model.R
|
no_license
|
XGCLab/TCA
|
R
| false
| false
| 5,695
|
r
|
library(data.table)
#mouse
scrna.count<-data.frame(fread("~/data/integration3/molecules.integrate_mRNA.txt",header=T,sep="\t"),row.names=1)
pbmc.ident <- data.frame(fread("./../matrix/mouse/pbmc.ident_low.txt",header=T,sep="\t"))
#human
#scrna.count<-data.frame(fread("~/data/integration4/molecules.integrate_mRNA.txt",header=T,sep="\t"),row.names=1)
#pbmc.ident <- data.frame(fread("./../matrix/human/pbmc.ident_low.txt",header=T,sep="\t"))
scrna.count<-scrna.count[,pbmc.ident$samples]
#create metadata new new new new
metadata.integrate<-matrix(ncol=2,nrow=ncol(scrna.count))
metadata.integrate[,1]<-colnames(scrna.count)
metadata.integrate[,2]<-substr(colnames(scrna.count), 1, 2)
colnames(metadata.integrate)<-c("cellID","tech")
metadata.integrate<-data.frame(metadata.integrate)
row.names(metadata.integrate)<-metadata.integrate[,1]
metadata.integrate$tech <- as.factor(metadata.integrate$tech)
levels(metadata.integrate$tech)
scrna.count<-t(scrna.count)
scrna.1.stage<-data.frame(scrna.count)
#normalize matrix
scrna.1.stage<-log10(scrna.1.stage+1)
########
#using gini
gini.index <-function(x){
x <- sort(x)
G <- sum(x * 1L:length(x))
G <- 2 * G/sum(x) - (length(x) + 1L)
G/length(x)
}
#using marker gene sets
load("/www/data/TCA/matrix/mouse/pbmc.markers_reference.rda")
#load("/www/data/TCA/matrix/human/pbmc.markers_sct.rda")
pbmc.markers<-subset(pbmc.markers, avg_logFC > 0.25)
pbmc.markers$cluster<-as.numeric(as.character(pbmc.markers$cluster))
pbmc.markers$cluster<-pbmc.markers$cluster+1
unique(pbmc.markers$cluster)
geneSets<- vector('list', max(pbmc.markers$cluster))
for (i in 1:max(pbmc.markers$cluster)){
pbmc.markers_sub<-subset(pbmc.markers, pbmc.markers$cluster == i)
geneSets[[i]]<-row.names(pbmc.markers_sub)
genes_tmp<-geneSets[[i]]
genes_tmp<-intersect(genes_tmp, colnames(scrna.1.stage))
geneSets[[i]]<-genes_tmp
}
gini.sum.names<-names(geneSets)
gini.sum<-character(0)
for (i in 1:length(geneSets)){
if (i %% 10 == 0) print(i)
genes_tmp<-geneSets[[i]]
genes_tmp<-intersect(genes_tmp, colnames(scrna.1.stage))
scrna.1.stage_sub<-scrna.1.stage[, genes_tmp]
gini.tmp <- apply(scrna.1.stage_sub, 1, gini.index)
gini.sum<-data.frame(cbind(gini.sum, gini.tmp))
gini.sum[,i]<-as.numeric(as.character(gini.sum[,i]))
}
colnames(gini.sum)<-gini.sum.names
gini.sum<-data.frame(gini.sum)
gini.sum[1:3,1:3]
for(i in 1:ncol(gini.sum)){
gini.sum[is.na(gini.sum[,i]), i] <- mean(gini.sum[,i], na.rm = TRUE)
}
library(dplyr)
save(gini.sum, file = "./ML/gini.sum_human.rda")
load(file = "./ML/gini.sum_human.rda")
gini.sum <- tibble::rownames_to_column(gini.sum, "samples")
scrna_sub_join<-dplyr::inner_join(pbmc.ident,gini.sum,by="samples")
#row.names(scrna_sub_join)<-scrna_sub_join$samples
scrna_sub_join<-scrna_sub_join[,-1];scrna_sub_join<-scrna_sub_join[,-1];colnames(scrna_sub_join)[1]<-"identity" #cluster
scrna_sub_join<-data.frame(scrna_sub_join)
#scrna_sub_join<-round(scrna_sub_join,4)
scrna_sub_join[1:3,1:3]
apply(is.na(scrna_sub_join), 2, which)
########
#using markers
#select features
marker.stage<-data.frame(fread("./../matrix/pbmc.markers.csv",header=T,sep=",",quote=""),row.names=1)
marker_sub<-subset(marker.stage,marker.stage$p_val_adj<0.001 & marker.stage$avg_logFC > 1)
markers.string<-unique(marker_sub$gene)
features<-intersect(markers.string,colnames(scrna.1.stage))
load("/www/data/TCA/matrix/human/pbmc.markers_sct.rda")
pbmc.markers<-subset(pbmc.markers, avg_logFC > 7.5 & p_val_adj<0.001)
markers.string<-pbmc.markers$gene
markers.string<-unique(markers.string)
features<-intersect(markers.string,colnames(scrna.1.stage))
#features<-c("Pou5f2","RP24-341O14.2","RP23-66E21.1","Phf2","Ssna1","Srek1","Ankar","Dnah8","RP23-440F7.2","RP24-300N16.3","Hnrnpm","Rps27a","Rdh11","Tchp","Ccp110","RP23-448A11.15","RP23-412L13.2","Gmcl1","Gm26799")
library(dplyr)
scrna_sub <- scrna.1.stage %>% dplyr::select(one_of(features))
scrna_sub <- tibble::rownames_to_column(scrna_sub, "samples")
scrna_sub_join<-inner_join(pbmc.ident,scrna_sub,by="samples")
#scrna_sub_join<-scrna_sub_join[,-1];scrna_sub_join<-scrna_sub_join[,-1] #identity
scrna_sub_join<-scrna_sub_join[,-1];scrna_sub_join<-scrna_sub_join[,-1];colnames(scrna_sub_join)[1]<-"identity" #cluster
scrna_sub_join<-data.frame(scrna_sub_join)
# random split samples into training and validation
scrna_sub_join$identity <- as.factor(scrna_sub_join$identity)
index <- sample(2,nrow(scrna_sub_join),replace = TRUE,prob=c(0.7,0.3))
traindata <- scrna_sub_join[index==1,]
testdata <- scrna_sub_join[index==2,]
#### SVM
library(e1071)
cats_svm_model <- svm(identity~.,data=traindata,type = "C")
cats_svm_model
#save(cats_svm_model, file = "./ML/human_classification_marker_svm.RData")
#save(cats_svm_model, file = "./ML/mouse_classification_marker_svm.RData")
#save(cats_svm_model, file = "./ML/human_classification_gini.RData")
#save(cats_svm_model, file = "./ML/mouse_classification_gini.RData")
#### random forest
library(randomForest)
cats_svm_model <- randomForest(identity~.,data=traindata,ntree=100)
plot(cats_svm_model)
cats_svm_model
#save(cats_svm_model, file = "./ML/human_classification_marker_forest.RData")
save(cats_svm_model, file = "./ML/mouse_classification_marker_forest.RData")
# training
cats_svm_model_pred_1 <- predict(cats_svm_model,traindata[,-1])
cats_table_1 <- table(pred=cats_svm_model_pred_1,true=traindata[,1])
accuracy_1 <- sum(diag(cats_table_1))/sum(cats_table_1)
accuracy_1
# validation
cats_svm_model_pred_2 <- predict(cats_svm_model,testdata[,-1])
cats_table_2 <- table(pred=cats_svm_model_pred_2,true=testdata[,1])
#cats_table_2
accuracy_2 <- sum(diag(cats_table_2))/sum(cats_table_2)
accuracy_2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.