content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# data file
f <- gzfile("household_power_consumption.txt.gz","rt");
nolines <- 100
greped<-c()
repeat {
lines=readLines(f,n=nolines)
idx <- grep("^[12]/2/2007", lines)
greped<-c(greped, lines[idx])
if(nolines!=length(lines)) {
break
}
}
close(f)
tc<-textConnection(greped,"rt")
df<-read.table(tc,sep=";",col.names = colnames(read.table(
"household_power_consumption.txt.gz",
nrow = 1, header = TRUE, sep=";")), na.strings = "?")
df$Date <- as.Date(df$Date , "%d/%m/%Y")
df$Time <- paste(df$Date, df$Time, sep=" ")
df$Time <- strptime(df$Time, "%Y-%m-%d %H:%M:%S")
png("plot1.png", width = 480, height = 480)
hist(df$Global_active_power, main = "Global Active power", col = "red", xlab = "Global Active Power (kilowatts)", )
dev.off
| /plot-1.R | no_license | tofighi/ExData_Plotting1 | R | false | false | 774 | r | # data file
f <- gzfile("household_power_consumption.txt.gz","rt");
nolines <- 100
greped<-c()
repeat {
lines=readLines(f,n=nolines)
idx <- grep("^[12]/2/2007", lines)
greped<-c(greped, lines[idx])
if(nolines!=length(lines)) {
break
}
}
close(f)
tc<-textConnection(greped,"rt")
df<-read.table(tc,sep=";",col.names = colnames(read.table(
"household_power_consumption.txt.gz",
nrow = 1, header = TRUE, sep=";")), na.strings = "?")
df$Date <- as.Date(df$Date , "%d/%m/%Y")
df$Time <- paste(df$Date, df$Time, sep=" ")
df$Time <- strptime(df$Time, "%Y-%m-%d %H:%M:%S")
png("plot1.png", width = 480, height = 480)
hist(df$Global_active_power, main = "Global Active power", col = "red", xlab = "Global Active Power (kilowatts)", )
dev.off
|
#Libraries-----------------------------------------------------------------------
source("global/load_packages.R", local = TRUE)
#Modules-----------------------------------------------------------------------
source("modules/module_dt.R")
#Functions---------------------------------------------------------------------
# Function enables the loading page feature of this app.
load_data <- function() {
Sys.sleep(2)
hide("loading_page")
show("main_content")
}
# Standardize data frame column names.
standard_names <- function(x) {
x %>%
dplyr::rename_all(funs(tolower(.) %>% trimws() %>% gsub(" | ", "_", .)))
}
#Import Data--------------------------------------------------------------------------
colnames.df <- suppressWarnings(
data.table::fread("data/wqdi_colnames.csv",showProgress = FALSE)) %>%
standard_names()
#------------------------------------------------------------------------------
acronyms.df <- suppressWarnings(
data.table::fread("data/wqdi_acronyms.csv",showProgress = FALSE)) %>%
standard_names()
#------------------------------------------------------------------------------
inventory.df <- suppressWarnings(
data.table::fread("data/wqdi.csv", showProgress = FALSE)) %>%
standard_names() %>%
dplyr::rename(source_no = "monitoring_station") %>%
select(source_no, lat, long)
#------------------------------------------------------------------------------
meta.df <- suppressWarnings(
data.table::fread("data/icprb_metadata.csv",
showProgress = FALSE)) %>%
standard_names() %>%
dplyr::rename(organization = "originator")
#------------------------------------------------------------------------------
inventory.df <- full_join(inventory.df, meta.df, by = c("source_no"))
inventory.df[inventory.df == "N/A"] <- "Unavailable"
inventory.df[is.na(inventory.df)] <- "Unavailable"
rm(meta.df)
#------------------------------------------------------------------------------
program.cols <- c("organization", "program_name", "site_location",
"purpose", "metric_parameter", "parameter_group",
"spatial_coverage", "fall_line", "lat_long",
"number_of_sites_sampled", "frequency_sampled",
"period_of_record_start_date", "period_of_record_end_date",
"collection_method", "update_frequency",
"public_or_restricted_data", "dataset_fees",
"data_type", "program_website", "data_link",
"contact_name", "contact_phone", "contact_email")
program.df <- inventory.df[, names(inventory.df) %in% program.cols]
site.cols <- c("organization", "program name", "station_id", "lat", "long")
site.df <- inventory.df[, names(inventory.df) %in% site.cols]
#------------------------------------------------------------------------------
# map.df <- suppressWarnings(
# data.table::fread("data/WQ_Map_Points_052218HUC_St_Cnty_nam.csv",
# showProgress = FALSE,
# data.table = FALSE)) %>%
# standard_names() %>%
# rename(county = "county_1",
# huc12 = "huc12_1",
# subwatershed = "name",
# stream_name = "gnis_name") %>%
# mutate(huc12 = paste0("0", huc12))
#leaflet.df <- inventory.df[, names(inventory.df) %in% leaflet.filter.cols]
| /shiny/shiny_wqdi/global.R | no_license | InterstateCommissionPotomacRiverBasin/wq_data_inventory | R | false | false | 3,363 | r | #Libraries-----------------------------------------------------------------------
source("global/load_packages.R", local = TRUE)
#Modules-----------------------------------------------------------------------
source("modules/module_dt.R")
#Functions---------------------------------------------------------------------
# Function enables the loading page feature of this app.
load_data <- function() {
Sys.sleep(2)
hide("loading_page")
show("main_content")
}
# Standardize data frame column names.
standard_names <- function(x) {
x %>%
dplyr::rename_all(funs(tolower(.) %>% trimws() %>% gsub(" | ", "_", .)))
}
#Import Data--------------------------------------------------------------------------
colnames.df <- suppressWarnings(
data.table::fread("data/wqdi_colnames.csv",showProgress = FALSE)) %>%
standard_names()
#------------------------------------------------------------------------------
acronyms.df <- suppressWarnings(
data.table::fread("data/wqdi_acronyms.csv",showProgress = FALSE)) %>%
standard_names()
#------------------------------------------------------------------------------
inventory.df <- suppressWarnings(
data.table::fread("data/wqdi.csv", showProgress = FALSE)) %>%
standard_names() %>%
dplyr::rename(source_no = "monitoring_station") %>%
select(source_no, lat, long)
#------------------------------------------------------------------------------
meta.df <- suppressWarnings(
data.table::fread("data/icprb_metadata.csv",
showProgress = FALSE)) %>%
standard_names() %>%
dplyr::rename(organization = "originator")
#------------------------------------------------------------------------------
inventory.df <- full_join(inventory.df, meta.df, by = c("source_no"))
inventory.df[inventory.df == "N/A"] <- "Unavailable"
inventory.df[is.na(inventory.df)] <- "Unavailable"
rm(meta.df)
#------------------------------------------------------------------------------
program.cols <- c("organization", "program_name", "site_location",
"purpose", "metric_parameter", "parameter_group",
"spatial_coverage", "fall_line", "lat_long",
"number_of_sites_sampled", "frequency_sampled",
"period_of_record_start_date", "period_of_record_end_date",
"collection_method", "update_frequency",
"public_or_restricted_data", "dataset_fees",
"data_type", "program_website", "data_link",
"contact_name", "contact_phone", "contact_email")
program.df <- inventory.df[, names(inventory.df) %in% program.cols]
site.cols <- c("organization", "program name", "station_id", "lat", "long")
site.df <- inventory.df[, names(inventory.df) %in% site.cols]
#------------------------------------------------------------------------------
# map.df <- suppressWarnings(
# data.table::fread("data/WQ_Map_Points_052218HUC_St_Cnty_nam.csv",
# showProgress = FALSE,
# data.table = FALSE)) %>%
# standard_names() %>%
# rename(county = "county_1",
# huc12 = "huc12_1",
# subwatershed = "name",
# stream_name = "gnis_name") %>%
# mutate(huc12 = paste0("0", huc12))
#leaflet.df <- inventory.df[, names(inventory.df) %in% leaflet.filter.cols]
|
#' Classify Something
#'
#' Classifies something.
#' Generic, with method \code{\link{classified.default}}
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @return see methods
#' @keywords internal
#' @family classified
#' @examples
#' example(classified.default)
classified <- function(x, ...)UseMethod('classified')
#' Create Classified from Factor
#'
#' Creates classified from factor. Uses \code{\link{classified.default}},
#' but supplies existing levels by default.
#'
#' @export
#' @return 'classified' 'factor'
#' @param x see \code{\link{factor}}
#' @param levels passed to \code{\link{classified.default}}; defaults to \code{levels(x)}
#' @param labels passed to \code{\link{classified.default}}; must be same length as levels(after removing values in \code{exclude}) and must not contain duplicates
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @param token informative label for messages
#' @param ... ignored
#' @importFrom dplyr distinct
#' @family classified
#' @examples
#' a <- factor(c('c','b','a'))
#' levels(classified(a))
#' attr(classified(a), 'codelist')
classified.factor <- function(
x = character(),
levels,
labels,
exclude = NA,
ordered = is.ordered(x),
nmax = NA,
token = character(0),
...
){
stopifnot(is.character(token), length(token) <= 1)
if(missing(levels)) levels <- match.fun('levels')(x)
levels <- setdiff(levels, exclude)
if(missing(labels)) labels <- levels
stopifnot(identical(length(levels), length(labels)))
if(any(duplicated(labels)))(stop(paste( collapse = ': ', c(token, 'duplicated labels not supported in this context'))))
y <- classified.default(
x,
levels = levels,
labels = labels,
exclude = exclude,
ordered = ordered,
nmax = NA,
...
)
y
}
#' Create Classified by Default
#'
#' Creates a factor of subclass 'classified',
#' for which there are attribute-preserving methods.
#' In particular, classified has a codelist attribute
#' indicating the origin of its levels: it is
#' constructed from the codelist attribute of x
#' if available, or from 'levels' and 'labels'
#' by default. Unlike the case for \code{\link{factor}},
#' length of labels cannot be one (i.e., different from
#' length of levels).
#'
#' @export
#' @return 'classified' 'factor'
#' @param x see \code{\link{factor}}
#' @param levels see \code{\link{factor}}
#' @param labels see \code{\link{factor}}, must have same length as levels
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @param token informative label for messages
#' @param ... ignored
#' @importFrom dplyr distinct
#' @family classified
#' @examples
#'
#' # classified creates a factor with a corresponding codelist attribute
#' classified(c('a','b','c'))
#'
#' # codelist 'remembers' the origins of levels
#' classified(c('a','b','c'), labels = c('A','B','C'))
#'
#' # classified is 'reversible'
#' library(magrittr)
#' c('a','b','c') %>%
#' classified(labels = c('A','B','C')) %>%
#' unclassified
classified.default <- function(
x = character(),
levels,
labels,
exclude = NA,
ordered = is.ordered(x),
nmax = NA,
token = character(0),
...
){
cl <- attr(x,'codelist') # could be NULL
# if we have a codelist, use it
if(!is.null(cl)){
attr(x,'codelist') <- NULL
# before working with codelist, honor the exclude request
bad <- sapply(cl, function(val)val %in% exclude)
cl <- cl[!bad]
# mimic non-NA exclude behavior:
# @ 0.10.12, commenting next (nonsensical?)
# if(length(exclude) == 0) cl <- c(cl, NA)
# default levels and labels
if(missing(levels)){
levels <- unlist(cl)
}
if(missing(labels)){
labels <- names(cl)
if(is.null(labels))labels <- rep('', length(levels))
labels[labels == ''] <- levels[labels == '']
}
}
# if no codelist, set up default labels and levels
if (missing(levels)) {
y <- unique(x, nmax = nmax)
ind <- order(y)
levels <- unique(as.character(y)[ind])
levels <- setdiff(levels, exclude)
}
if(missing(labels)){
labels <- as.character(levels)
}
# at this point, levels and labels should have matching length
# should be true using defaults
if(length(levels) != length(labels))stop(
paste(
collapse = ': ',
c(
token,
'classified requires labels and levels of the same length'
)
)
)
# under some circumstances, levels has names, which may be NA
# then data.frame inherits NA rownames which is an error.
names(levels) <- NULL
names(labels) <- NULL
codes <- data.frame(levels = levels, labels = labels)
if(any(duplicated(codes))){
duplicated <- anyDuplicated(codes)
msg <- paste0(
'dropping duplicated levels, e.g.: ',
codes$levels[[duplicated]],
' (',
codes$labels[[duplicated]],
')'
)
msg <- paste(collapse = ': ', c(token, msg))
warning(msg)
codes <- unique(codes)
}
if(any(duplicated(codes$levels))){
duplicated <- anyDuplicated(codes$levels)
msg <- paste0(
'level(s) cross-labelled, e.g.: ',
codes$levels[[duplicated]],
': ',
paste(
collapse = ', ',
codes$labels[codes$levels == codes$levels[[duplicated]]]
)
)
msg <- paste(collapse = ': ', token, msg)
warning(msg)
}
if(any(duplicated(codes$labels))){
duplicated <- anyDuplicated(codes$labels)
msg <- paste0(
'levels like-labelled, e.g.: ',
paste(collapse = ', ', codes$levels[codes$labels == codes$labels[[duplicated]]]),
': ',
codes$labels[[duplicated]]
)
msg <- paste(collapse = ': ', token, msg)
warning(msg)
}
# having dropped any duplicates, we unpack codes
labels <- codes$labels
levels <- codes$levels
# in every case, make a good codelist
codelist <- as.list(labels)
names(codelist) <- levels
# simplify codelist if possible
if(identical(paste(names(codelist)), paste(unlist(codelist)))) {
names(codelist) <- NULL
# codelist <- unlist(codelist) # @v0.8.9 for consistency with other methods
}
# call factor()
z <- factor(
x = x,
levels = levels,
labels = labels,
exclude = exclude, # but exclusions will have already occurred
ordered = ordered,
nmax = nmax
)
# enforce attributes
nms <- names(attributes(x))
nms <- setdiff(nms, c('class','levels'))
for(nm in nms){
attr(z, nm) <- attr(x, nm)
}
attr(z, 'codelist') <- codelist
# enforce class
class(z) <- union('classified', class(z))
# return
z
}
# Coerce to Classified
#
# Coerce something to classified.
# Generic, with method for factor.
# Deprecated. Prefer classified().
#
# @param x object
# @param ... passed arguments
# @export
# @keywords internal
# @family classified
# @return see methods
# @examples
# example(as_classified.factor)
# as_classified <- function(x, ...)UseMethod('as_classified')
# Coerce Factor to Classified
#
# Coerce factor to classified.
# Creates a factor that retains attributes during subsetting.
# Deprecated. Prefer classified().
#
# @param x factor
# @param ... ignored arguments
# @export
# @keywords internal
# @family classified
# @return class 'classified' 'factor'
# @examples
# class(as_classified(factor(letters)))
# as_classified.factor <- function(x, ...){
# class(x) <- union('classified', class(x))
# x
# }
# http://adv-r.had.co.nz/S3.html
# When implementing a vector class, you should implement these methods:
#length, [, [<-, [[, [[<-, c.
#' Subset Classified
#'
#' Subsets classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' attr(a, 'label') <- 'foo'
#' a <- a[1:3]
#' attributes(a)
`[.classified` <- function(x, ...){
y <- NextMethod()
# contrasts and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('contrasts','levels'))
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Element-select Classified
#'
#' Selects element of classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' attr(a, 'label') <- 'foo'
#' a <- a[[2]]
#' attributes(a)
`[[.classified` <- function(x, ...){
y <- NextMethod()
# contrasts and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('contrasts','levels'))
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Assign Subset of Classified
#'
#' Assigns subset of classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' a[2:3] <- 'a'
#' str(a)
#' class(a)
`[<-.classified` <- function(x, ..., value){
y <- NextMethod()
# class and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('levels')) # implicitly restore class
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Assign Element of Classified
#'
#' Assigns element of classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' a[[3]] <- 'a'
#' str(a)
#' class(a)
`[[<-.classified` <- function(x, ..., value){
y <- NextMethod()
# class and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('levels')) # implicitly restore class
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Combine Classified
#'
#' Combines classified factor, retaining attributes.
#' Attributes other than levels and codelist are taken
#' from the first argument. Attribute 'levels' is
#' supplied by next method. Attribute 'codelist'
#' is the combined codelists in sequence of
#' all (dots) arguments, after silently removing
#' exact duplicates, and then removing
#' duplicated names with warning.
#'
#' @param ... passed to next method
#' @param recursive passed to unlist() internally
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' b <- classified(letters[3:5])
#' c <- c(a,b)
#' c
#' class(c)
#'
`c.classified` <- function( ..., recursive = TRUE ){
c_factor <- function (..., recursive = TRUE) { # i.e. c.factor() from R 4.1.0
x <- list(...)
y <- unlist(x, recursive = recursive)
if (
inherits(y, "factor") &&
all(vapply(x, inherits,NA, "ordered")) &&
(length(unique(lapply(x, levels))) == 1L)
) class(y) <- c("ordered", "factor")
y
}
# y <- NextMethod() # not back-compatible before R 4.1.0
y <- c_factor(..., recursive = recursive)
# class and levels will have been handled
all <- list(...)
x <- all[[1]]
nms <- names(attributes(x))
nms <- setdiff(nms, c('levels')) # implicitly restore class
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
# combine levels
codelist <- list()
for(i in 1:length(all)){
codelist <- c(codelist, attr(all[[i]], 'codelist'))
}
# explicit names
if(is.null(names(codelist)))names(codelist) <- unlist(codelist)
# codelist names can be be NA but not blank
names(codelist)[which(names(codelist) == '')] <- unlist(codelist)[which(names(codelist) == '')]
codelist <- codelist[!duplicated(codelist)] # silently remove exact dups
if(any(duplicated(names(codelist))))warning('conflicting codelist specifications')
codelist <- codelist[!duplicated(names(codelist))]
#if(all(names(codelist) == unlist(codelist))){
if(identical(names(codelist), as.character(unlist(codelist)))){
names(codelist) <- NULL
codelist <- unlist(codelist)
}
attr(y,'codelist') <- codelist
y
}
#' Classify Data Frame
#'
#' Coerces items in data.frame with codelist attribute to 'classified':
#' a factor with a codelist attribute.
#'
#' @param x data.frame
#' @param ... passed to \code{\link[dplyr]{select}} to limit column scope
#; also passed to \code{\link{classified.default}} to modify behavior
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @export
#' @keywords internal
#' @return data.frame
#' @family classified
#' @family interface
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' x %>% explicit_guide %>% decorations(Age, Race, Heart:glyco)
#' x %>% explicit_guide %>% classified %>% decorations(Age, Race, Heart:glyco)
#' x %>% explicit_guide %>% classified(Heart:glyco) %>% decorations(Age, Race, Heart:glyco)
classified.data.frame <- function(
x,
...,
exclude = NA,
ordered = is.ordered(x),
nmax = NA
){
my_class <- class(x)
for(nm in selected(x,...)){
if('codelist' %in% names(attributes(x[[nm]]))){
# grouped_df can drop subclass!
x[[nm]] <- classified(
x[[nm]],
exclude = exclude,
ordered = ordered,
nmax = nmax,
token = nm
)
}
}
class(x) <- my_class
x
}
#' Classify Decorated Vector
#'
#' Coerces dvec to 'classified':
#' a factor with a codelist attribute.
#' Results may differ if explicit_guide()
#' is called first.
#'
#' @param x dvec
#' @param ... un-named arguments ignored. Named arguments passed to \code{\link{classified.default}} to modify behavior
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @param token informative label for messages
#' @export
#' @keywords internal
#' @return classified
#' @family classified
#' @family dvec
#' @examples
#' library(magrittr)
#' x <- as_dvec(1:3)
#' attr(x, 'guide') <- list(a = 1, b = 2, c = 3)
#' x %>% str
#' x %>% classified %>% str
#' x %>% explicit_guide %>% classified %>% str
classified.dvec <- function(
x,
...,
exclude = NA,
ordered = is.ordered(x),
nmax = NA,
token = character(0)
){
y <- unclass(x)
y <- classified(
y,
exclude = exclude,
ordered = ordered,
nmax = nmax,
token = token,
...
)
y
}
#' Coerce Classified to Integer
#'
#' Coerces classified to integer.
#' Result is like \code{as.integer(as.numeric(x)) + offset}
#' but has a guide attribute: a list of integers
#' whose names are the original levels of x.
#' If you need a simple integer, consider coercing first to numeric.
#'
#' @param x classified, see \code{\link{classified}}
#' @param offset an integer value to add to intermediate result
#' @param ... passed to \code{\link{desolve}}
#' @param persistence whether to return 'dvec' (is.integer(): TRUE) or just integer.
# @param exclude_attr discard these when preserving attributes of x in result
#' @export
#' @family classified
#' @return integer (possibly of class dvec)
#' @examples
#' library(magrittr)
#'
#' # create factor with codelist attribute
#' classified(c('knife','fork','spoon'))
#'
#' # give back a simple numeric
#' classified(c('knife','fork','spoon')) %>% as.numeric
#'
#' # intentionally preserve levels as 'guide' attribute
#' classified(c('knife','fork','spoon')) %>% as.integer
#'
#' # implement offset
#' classified(c('knife','fork','spoon')) %>% as.integer(-1)
#'
#' # globally defeat the 'persistence' paradigm
#' options(yamlet_persistence = FALSE)
#' c('knife','fork','spoon') %>%
#' classified %>%
#' as.integer %>%
#' class # integer
#'
#' # remove option to restore default persistence paradigm
#' options(yamlet_persistence = NULL)
#' c('knife','fork','spoon') %>%
#' classified %>%
#' as.integer %>%
#' class # dvec
#'
#' # locally defeat persistence paradigm
#' c('knife','fork','spoon') %>%
#' classified %>%
#' as.integer(persistence = FALSE) %>%
#' class # integer
#'
#'
as.integer.classified <- function(
x,
offset = 0L,
...,
persistence = getOption('yamlet_persistence', TRUE) #,
#exclude_attr = getOption("yamlet_as.integer_exclude_attr", c("class", "levels", "codelist"))
){
stopifnot(
length(offset) == 1,
!is.na(offset),
as.integer(offset) == offset
)
offset <- as.integer(offset)
# note: levels(x) should be same as unlist(attr(x, 'codelist'))
# y <- as.numeric(x, ...)
# y <- as.integer(y, ...) # explicitly casting to int as of 0.9.0
# y <- y + offset
# z <- mimic(x, y, ...) # drops levels!
# x has a codelist and seq gives integer
vals <- seq_along(attr(x, 'codelist'))
vals <- vals + offset
names(attr(x, 'codelist')) <- vals
r <- desolve(x, persistence = TRUE, ...) # gives guide instead of codelist at 0.9.0
# at this point, r should be dvec
# passing persistence to desolve fails because there is no
# vector method for implicit_guide (only a data.frame method)
if(!persistence) {
r <- unclass(r)
}
r
}
#' Create Classified from Classified
#'
#' See \code{\link{classified.default}}.
#' Formerly (version 0.10.10), calling classified() on a
#' classified object was a non-operation.
#' Currently we call factor(x, ...) and then
#' try to reconcile the codelist attribute with resulting
#' levels.
#'
#' By default classified is idempotent, such that classified(classified(x)) is
#' the same as classified(x). In contrast, factor(factor(x)) will drop unused
#' levels (not shown). To drop unused levels, use classified(classified(x), drop = TRUE).
#'
#' @export
#' @return 'classified' 'factor'
#' @param x classified
#' @param levels passed to \code{\link{factor}}; defaults to \code{levels(x)}
#' @param labels passed to \code{\link{factor}}; must be same length as levels(after removing values in \code{exclude} and unused levels if \code{drop} is TRUE) and must not contain duplicates
#' @param exclude passed to \code{\link{factor}}
#' @param ordered passed to \code{\link{factor}}
#' @param nmax passed to \code{\link{factor}}
#' @param drop whether to drop unused levels
#' @param ... ignored
#' @keywords internal
#' @family classified
#' @examples
#'
#' a <- 4:6
#' attr(a, 'codelist') <- list(d = 4, e = 5, f = 6, g = 7)
#' b <- classified(a)
#' a
#' b
#' class(b)
#' classified(b)
#' identical(b, classified(b))
classified.classified <- function(
x,
levels,
labels,
exclude = NULL,
ordered = is.ordered(x),
nmax = NA,
drop = FALSE,
...
){
if(missing(levels)) levels <- match.fun('levels')(x)
levels <- setdiff(levels, exclude)
if(drop) levels <- levels[levels %in% x]
if(missing(labels)) labels <- levels
stopifnot(identical(length(levels), length(labels)))
if(any(duplicated(labels)))(stop('duplicated labels not supported in this context'))
codelist <- attr(x, 'codelist')
nms <- names(codelist) # from (character)
vals <- as.character(unlist(codelist)) # to (coerced to character)
stopifnot(identical(levels(x), vals)) # continuity check: should always be true
y <- factor(
x,
levels = levels,
labels = labels,
exclude = exclude,
ordered = ordered,
nmax = nmax
)
# now we rebuild the codelist
# nms is the original form and order
# levels(y) is the current from and order
# we need a codelist with levels(y) but names from nms
# i.e., we need to (re) map names to the current levels
# the current levels though derive from the provided labels
# current level order should prevail,
# labels should be traced to provided levels,
# and thence to provided (codelist) vals,
# and thence to provided (codelist) nms
codelist <- as.list(type.convert(levels(y), as.is = TRUE))
# what provided values of 'levels' match existing values of 'levels',
# which are taken from provided 'labels'?
was <- levels[match(levels(y), labels)]
# now we have each former level for existing levels(y)
# in an order corresponding to levels(y)
# Those former levels were necessarily among the vals of former codelist.
# we recover the meanings from nms
meant <- nms[match(was, vals)]
# now we know what these levels meant originally. Possibly nothing. Possibly NA.
names(codelist) <- meant
# all this manipulation could introduce multiple NA as codelist names.
# in fact, codelist names should never be duplicated.
if(any(duplicated(meant))){
example <- meant[duplicated(meant)][[1]]
warning('codelist names should not contain duplicates, e.g. ', example)
}
# enforce attributes
nms <- names(attributes(x))
nms <- setdiff(nms, c('class','levels','codelist','guide'))
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
attr(y, 'codelist') <- codelist
class(y) <- union('classified', class(y))
y
}
# Abbreviate Classified
#
# Abbreviated class name for 'classified'.
#
# @export
# @importFrom vctrs vec_ptype_abbr
# @method vec_ptype_abbr classified
# @return character
# @keywords internal
# @param x classified
# @param ... ignored
# @examples
# cat(vec_ptype_abbr(classified(0)))
# vec_ptype_abbr.classified <- function(x, ...) {
# "clsfd"
# }
#' @importFrom pillar type_sum
#' @export
pillar::type_sum
#' Summarize Type of Classified
#'
#' Summarizes type of classified.
#'
#' @param x classified
#' @importFrom pillar type_sum
#' @export
#' @keywords internal
#' @method type_sum classified
#' @examples
#' type_sum(classified(0))
type_sum.classified <- function(x){
'clfac'
}
| /R/classified.R | no_license | bergsmat/yamlet | R | false | false | 21,822 | r | #' Classify Something
#'
#' Classifies something.
#' Generic, with method \code{\link{classified.default}}
#' @param x object of dispatch
#' @param ... passed arguments
#' @export
#' @return see methods
#' @keywords internal
#' @family classified
#' @examples
#' example(classified.default)
classified <- function(x, ...)UseMethod('classified')
#' Create Classified from Factor
#'
#' Creates classified from factor. Uses \code{\link{classified.default}},
#' but supplies existing levels by default.
#'
#' @export
#' @return 'classified' 'factor'
#' @param x see \code{\link{factor}}
#' @param levels passed to \code{\link{classified.default}}; defaults to \code{levels(x)}
#' @param labels passed to \code{\link{classified.default}}; must be same length as levels(after removing values in \code{exclude}) and must not contain duplicates
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @param token informative label for messages
#' @param ... ignored
#' @importFrom dplyr distinct
#' @family classified
#' @examples
#' a <- factor(c('c','b','a'))
#' levels(classified(a))
#' attr(classified(a), 'codelist')
classified.factor <- function(
x = character(),
levels,
labels,
exclude = NA,
ordered = is.ordered(x),
nmax = NA,
token = character(0),
...
){
stopifnot(is.character(token), length(token) <= 1)
if(missing(levels)) levels <- match.fun('levels')(x)
levels <- setdiff(levels, exclude)
if(missing(labels)) labels <- levels
stopifnot(identical(length(levels), length(labels)))
if(any(duplicated(labels)))(stop(paste( collapse = ': ', c(token, 'duplicated labels not supported in this context'))))
y <- classified.default(
x,
levels = levels,
labels = labels,
exclude = exclude,
ordered = ordered,
nmax = NA,
...
)
y
}
#' Create Classified by Default
#'
#' Creates a factor of subclass 'classified',
#' for which there are attribute-preserving methods.
#' In particular, classified has a codelist attribute
#' indicating the origin of its levels: it is
#' constructed from the codelist attribute of x
#' if available, or from 'levels' and 'labels'
#' by default. Unlike the case for \code{\link{factor}},
#' length of labels cannot be one (i.e., different from
#' length of levels).
#'
#' @export
#' @return 'classified' 'factor'
#' @param x see \code{\link{factor}}
#' @param levels see \code{\link{factor}}
#' @param labels see \code{\link{factor}}, must have same length as levels
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @param token informative label for messages
#' @param ... ignored
#' @importFrom dplyr distinct
#' @family classified
#' @examples
#'
#' # classified creates a factor with a corresponding codelist attribute
#' classified(c('a','b','c'))
#'
#' # codelist 'remembers' the origins of levels
#' classified(c('a','b','c'), labels = c('A','B','C'))
#'
#' # classified is 'reversible'
#' library(magrittr)
#' c('a','b','c') %>%
#' classified(labels = c('A','B','C')) %>%
#' unclassified
classified.default <- function(
x = character(),
levels,
labels,
exclude = NA,
ordered = is.ordered(x),
nmax = NA,
token = character(0),
...
){
cl <- attr(x,'codelist') # could be NULL
# if we have a codelist, use it
if(!is.null(cl)){
attr(x,'codelist') <- NULL
# before working with codelist, honor the exclude request
bad <- sapply(cl, function(val)val %in% exclude)
cl <- cl[!bad]
# mimic non-NA exclude behavior:
# @ 0.10.12, commenting next (nonsensical?)
# if(length(exclude) == 0) cl <- c(cl, NA)
# default levels and labels
if(missing(levels)){
levels <- unlist(cl)
}
if(missing(labels)){
labels <- names(cl)
if(is.null(labels))labels <- rep('', length(levels))
labels[labels == ''] <- levels[labels == '']
}
}
# if no codelist, set up default labels and levels
if (missing(levels)) {
y <- unique(x, nmax = nmax)
ind <- order(y)
levels <- unique(as.character(y)[ind])
levels <- setdiff(levels, exclude)
}
if(missing(labels)){
labels <- as.character(levels)
}
# at this point, levels and labels should have matching length
# should be true using defaults
if(length(levels) != length(labels))stop(
paste(
collapse = ': ',
c(
token,
'classified requires labels and levels of the same length'
)
)
)
# under some circumstances, levels has names, which may be NA
# then data.frame inherits NA rownames which is an error.
names(levels) <- NULL
names(labels) <- NULL
codes <- data.frame(levels = levels, labels = labels)
if(any(duplicated(codes))){
duplicated <- anyDuplicated(codes)
msg <- paste0(
'dropping duplicated levels, e.g.: ',
codes$levels[[duplicated]],
' (',
codes$labels[[duplicated]],
')'
)
msg <- paste(collapse = ': ', c(token, msg))
warning(msg)
codes <- unique(codes)
}
if(any(duplicated(codes$levels))){
duplicated <- anyDuplicated(codes$levels)
msg <- paste0(
'level(s) cross-labelled, e.g.: ',
codes$levels[[duplicated]],
': ',
paste(
collapse = ', ',
codes$labels[codes$levels == codes$levels[[duplicated]]]
)
)
msg <- paste(collapse = ': ', token, msg)
warning(msg)
}
if(any(duplicated(codes$labels))){
duplicated <- anyDuplicated(codes$labels)
msg <- paste0(
'levels like-labelled, e.g.: ',
paste(collapse = ', ', codes$levels[codes$labels == codes$labels[[duplicated]]]),
': ',
codes$labels[[duplicated]]
)
msg <- paste(collapse = ': ', token, msg)
warning(msg)
}
# having dropped any duplicates, we unpack codes
labels <- codes$labels
levels <- codes$levels
# in every case, make a good codelist
codelist <- as.list(labels)
names(codelist) <- levels
# simplify codelist if possible
if(identical(paste(names(codelist)), paste(unlist(codelist)))) {
names(codelist) <- NULL
# codelist <- unlist(codelist) # @v0.8.9 for consistency with other methods
}
# call factor()
z <- factor(
x = x,
levels = levels,
labels = labels,
exclude = exclude, # but exclusions will have already occurred
ordered = ordered,
nmax = nmax
)
# enforce attributes
nms <- names(attributes(x))
nms <- setdiff(nms, c('class','levels'))
for(nm in nms){
attr(z, nm) <- attr(x, nm)
}
attr(z, 'codelist') <- codelist
# enforce class
class(z) <- union('classified', class(z))
# return
z
}
# Coerce to Classified
#
# Coerce something to classified.
# Generic, with method for factor.
# Deprecated. Prefer classified().
#
# @param x object
# @param ... passed arguments
# @export
# @keywords internal
# @family classified
# @return see methods
# @examples
# example(as_classified.factor)
# as_classified <- function(x, ...)UseMethod('as_classified')
# Coerce Factor to Classified
#
# Coerce factor to classified.
# Creates a factor that retains attributes during subsetting.
# Deprecated. Prefer classified().
#
# @param x factor
# @param ... ignored arguments
# @export
# @keywords internal
# @family classified
# @return class 'classified' 'factor'
# @examples
# class(as_classified(factor(letters)))
# as_classified.factor <- function(x, ...){
# class(x) <- union('classified', class(x))
# x
# }
# http://adv-r.had.co.nz/S3.html
# When implementing a vector class, you should implement these methods:
#length, [, [<-, [[, [[<-, c.
#' Subset Classified
#'
#' Subsets classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' attr(a, 'label') <- 'foo'
#' a <- a[1:3]
#' attributes(a)
`[.classified` <- function(x, ...){
y <- NextMethod()
# contrasts and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('contrasts','levels'))
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Element-select Classified
#'
#' Selects element of classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' attr(a, 'label') <- 'foo'
#' a <- a[[2]]
#' attributes(a)
`[[.classified` <- function(x, ...){
y <- NextMethod()
# contrasts and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('contrasts','levels'))
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Assign Subset of Classified
#'
#' Assigns subset of classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' a[2:3] <- 'a'
#' str(a)
#' class(a)
`[<-.classified` <- function(x, ..., value){
y <- NextMethod()
# class and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('levels')) # implicitly restore class
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Assign Element of Classified
#'
#' Assigns element of classified factor, retaining attributes.
#' @param x classified factor
#' @param ... passed to next method
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' a[[3]] <- 'a'
#' str(a)
#' class(a)
`[[<-.classified` <- function(x, ..., value){
y <- NextMethod()
# class and levels will have been handled
nms <- names(attributes(x))
nms <- setdiff(nms, c('levels')) # implicitly restore class
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
y
}
#' Combine Classified
#'
#' Combines classified factor, retaining attributes.
#' Attributes other than levels and codelist are taken
#' from the first argument. Attribute 'levels' is
#' supplied by next method. Attribute 'codelist'
#' is the combined codelists in sequence of
#' all (dots) arguments, after silently removing
#' exact duplicates, and then removing
#' duplicated names with warning.
#'
#' @param ... passed to next method
#' @param recursive passed to unlist() internally
#' @export
#' @keywords internal
#' @family classified
#' @return class 'classified' 'factor'
#' @examples
#' a <- classified(letters[1:3])
#' b <- classified(letters[3:5])
#' c <- c(a,b)
#' c
#' class(c)
#'
`c.classified` <- function( ..., recursive = TRUE ){
c_factor <- function (..., recursive = TRUE) { # i.e. c.factor() from R 4.1.0
x <- list(...)
y <- unlist(x, recursive = recursive)
if (
inherits(y, "factor") &&
all(vapply(x, inherits,NA, "ordered")) &&
(length(unique(lapply(x, levels))) == 1L)
) class(y) <- c("ordered", "factor")
y
}
# y <- NextMethod() # not back-compatible before R 4.1.0
y <- c_factor(..., recursive = recursive)
# class and levels will have been handled
all <- list(...)
x <- all[[1]]
nms <- names(attributes(x))
nms <- setdiff(nms, c('levels')) # implicitly restore class
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
# combine levels
codelist <- list()
for(i in 1:length(all)){
codelist <- c(codelist, attr(all[[i]], 'codelist'))
}
# explicit names
if(is.null(names(codelist)))names(codelist) <- unlist(codelist)
# codelist names can be be NA but not blank
names(codelist)[which(names(codelist) == '')] <- unlist(codelist)[which(names(codelist) == '')]
codelist <- codelist[!duplicated(codelist)] # silently remove exact dups
if(any(duplicated(names(codelist))))warning('conflicting codelist specifications')
codelist <- codelist[!duplicated(names(codelist))]
#if(all(names(codelist) == unlist(codelist))){
if(identical(names(codelist), as.character(unlist(codelist)))){
names(codelist) <- NULL
codelist <- unlist(codelist)
}
attr(y,'codelist') <- codelist
y
}
#' Classify Data Frame
#'
#' Coerces items in data.frame with codelist attribute to 'classified':
#' a factor with a codelist attribute.
#'
#' @param x data.frame
#' @param ... passed to \code{\link[dplyr]{select}} to limit column scope
#; also passed to \code{\link{classified.default}} to modify behavior
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @export
#' @keywords internal
#' @return data.frame
#' @family classified
#' @family interface
#' @examples
#' library(magrittr)
#' file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
#' x <- decorate(file)
#' x %>% explicit_guide %>% decorations(Age, Race, Heart:glyco)
#' x %>% explicit_guide %>% classified %>% decorations(Age, Race, Heart:glyco)
#' x %>% explicit_guide %>% classified(Heart:glyco) %>% decorations(Age, Race, Heart:glyco)
classified.data.frame <- function(
x,
...,
exclude = NA,
ordered = is.ordered(x),
nmax = NA
){
my_class <- class(x)
for(nm in selected(x,...)){
if('codelist' %in% names(attributes(x[[nm]]))){
# grouped_df can drop subclass!
x[[nm]] <- classified(
x[[nm]],
exclude = exclude,
ordered = ordered,
nmax = nmax,
token = nm
)
}
}
class(x) <- my_class
x
}
#' Classify Decorated Vector
#'
#' Coerces dvec to 'classified':
#' a factor with a codelist attribute.
#' Results may differ if explicit_guide()
#' is called first.
#'
#' @param x dvec
#' @param ... un-named arguments ignored. Named arguments passed to \code{\link{classified.default}} to modify behavior
#' @param exclude see \code{\link{factor}}
#' @param ordered see \code{\link{factor}}
#' @param nmax see \code{\link{factor}}
#' @param token informative label for messages
#' @export
#' @keywords internal
#' @return classified
#' @family classified
#' @family dvec
#' @examples
#' library(magrittr)
#' x <- as_dvec(1:3)
#' attr(x, 'guide') <- list(a = 1, b = 2, c = 3)
#' x %>% str
#' x %>% classified %>% str
#' x %>% explicit_guide %>% classified %>% str
classified.dvec <- function(
x,
...,
exclude = NA,
ordered = is.ordered(x),
nmax = NA,
token = character(0)
){
y <- unclass(x)
y <- classified(
y,
exclude = exclude,
ordered = ordered,
nmax = nmax,
token = token,
...
)
y
}
#' Coerce Classified to Integer
#'
#' Coerces classified to integer.
#' Result is like \code{as.integer(as.numeric(x)) + offset}
#' but has a guide attribute: a list of integers
#' whose names are the original levels of x.
#' If you need a simple integer, consider coercing first to numeric.
#'
#' @param x classified, see \code{\link{classified}}
#' @param offset an integer value to add to intermediate result
#' @param ... passed to \code{\link{desolve}}
#' @param persistence whether to return 'dvec' (is.integer(): TRUE) or just integer.
# @param exclude_attr discard these when preserving attributes of x in result
#' @export
#' @family classified
#' @return integer (possibly of class dvec)
#' @examples
#' library(magrittr)
#'
#' # create factor with codelist attribute
#' classified(c('knife','fork','spoon'))
#'
#' # give back a simple numeric
#' classified(c('knife','fork','spoon')) %>% as.numeric
#'
#' # intentionally preserve levels as 'guide' attribute
#' classified(c('knife','fork','spoon')) %>% as.integer
#'
#' # implement offset
#' classified(c('knife','fork','spoon')) %>% as.integer(-1)
#'
#' # globally defeat the 'persistence' paradigm
#' options(yamlet_persistence = FALSE)
#' c('knife','fork','spoon') %>%
#' classified %>%
#' as.integer %>%
#' class # integer
#'
#' # remove option to restore default persistence paradigm
#' options(yamlet_persistence = NULL)
#' c('knife','fork','spoon') %>%
#' classified %>%
#' as.integer %>%
#' class # dvec
#'
#' # locally defeat persistence paradigm
#' c('knife','fork','spoon') %>%
#' classified %>%
#' as.integer(persistence = FALSE) %>%
#' class # integer
#'
#'
as.integer.classified <- function(
x,
offset = 0L,
...,
persistence = getOption('yamlet_persistence', TRUE) #,
#exclude_attr = getOption("yamlet_as.integer_exclude_attr", c("class", "levels", "codelist"))
){
stopifnot(
length(offset) == 1,
!is.na(offset),
as.integer(offset) == offset
)
offset <- as.integer(offset)
# note: levels(x) should be same as unlist(attr(x, 'codelist'))
# y <- as.numeric(x, ...)
# y <- as.integer(y, ...) # explicitly casting to int as of 0.9.0
# y <- y + offset
# z <- mimic(x, y, ...) # drops levels!
# x has a codelist and seq gives integer
vals <- seq_along(attr(x, 'codelist'))
vals <- vals + offset
names(attr(x, 'codelist')) <- vals
r <- desolve(x, persistence = TRUE, ...) # gives guide instead of codelist at 0.9.0
# at this point, r should be dvec
# passing persistence to desolve fails because there is no
# vector method for implicit_guide (only a data.frame method)
if(!persistence) {
r <- unclass(r)
}
r
}
#' Create Classified from Classified
#'
#' See \code{\link{classified.default}}.
#' Formerly (version 0.10.10), calling classified() on a
#' classified object was a non-operation.
#' Currently we call factor(x, ...) and then
#' try to reconcile the codelist attribute with resulting
#' levels.
#'
#' By default classified is idempotent, such that classified(classified(x)) is
#' the same as classified(x). In contrast, factor(factor(x)) will drop unused
#' levels (not shown). To drop unused levels, use classified(classified(x), drop = TRUE).
#'
#' @export
#' @return 'classified' 'factor'
#' @param x classified
#' @param levels passed to \code{\link{factor}}; defaults to \code{levels(x)}
#' @param labels passed to \code{\link{factor}}; must be same length as levels(after removing values in \code{exclude} and unused levels if \code{drop} is TRUE) and must not contain duplicates
#' @param exclude passed to \code{\link{factor}}
#' @param ordered passed to \code{\link{factor}}
#' @param nmax passed to \code{\link{factor}}
#' @param drop whether to drop unused levels
#' @param ... ignored
#' @keywords internal
#' @family classified
#' @examples
#'
#' a <- 4:6
#' attr(a, 'codelist') <- list(d = 4, e = 5, f = 6, g = 7)
#' b <- classified(a)
#' a
#' b
#' class(b)
#' classified(b)
#' identical(b, classified(b))
classified.classified <- function(
x,
levels,
labels,
exclude = NULL,
ordered = is.ordered(x),
nmax = NA,
drop = FALSE,
...
){
if(missing(levels)) levels <- match.fun('levels')(x)
levels <- setdiff(levels, exclude)
if(drop) levels <- levels[levels %in% x]
if(missing(labels)) labels <- levels
stopifnot(identical(length(levels), length(labels)))
if(any(duplicated(labels)))(stop('duplicated labels not supported in this context'))
codelist <- attr(x, 'codelist')
nms <- names(codelist) # from (character)
vals <- as.character(unlist(codelist)) # to (coerced to character)
stopifnot(identical(levels(x), vals)) # continuity check: should always be true
y <- factor(
x,
levels = levels,
labels = labels,
exclude = exclude,
ordered = ordered,
nmax = nmax
)
# now we rebuild the codelist
# nms is the original form and order
# levels(y) is the current from and order
# we need a codelist with levels(y) but names from nms
# i.e., we need to (re) map names to the current levels
# the current levels though derive from the provided labels
# current level order should prevail,
# labels should be traced to provided levels,
# and thence to provided (codelist) vals,
# and thence to provided (codelist) nms
codelist <- as.list(type.convert(levels(y), as.is = TRUE))
# what provided values of 'levels' match existing values of 'levels',
# which are taken from provided 'labels'?
was <- levels[match(levels(y), labels)]
# now we have each former level for existing levels(y)
# in an order corresponding to levels(y)
# Those former levels were necessarily among the vals of former codelist.
# we recover the meanings from nms
meant <- nms[match(was, vals)]
# now we know what these levels meant originally. Possibly nothing. Possibly NA.
names(codelist) <- meant
# all this manipulation could introduce multiple NA as codelist names.
# in fact, codelist names should never be duplicated.
if(any(duplicated(meant))){
example <- meant[duplicated(meant)][[1]]
warning('codelist names should not contain duplicates, e.g. ', example)
}
# enforce attributes
nms <- names(attributes(x))
nms <- setdiff(nms, c('class','levels','codelist','guide'))
for(nm in nms){
attr(y, nm) <- attr(x, nm)
}
attr(y, 'codelist') <- codelist
class(y) <- union('classified', class(y))
y
}
# Abbreviate Classified
#
# Abbreviated class name for 'classified'.
#
# @export
# @importFrom vctrs vec_ptype_abbr
# @method vec_ptype_abbr classified
# @return character
# @keywords internal
# @param x classified
# @param ... ignored
# @examples
# cat(vec_ptype_abbr(classified(0)))
# vec_ptype_abbr.classified <- function(x, ...) {
# "clsfd"
# }
#' @importFrom pillar type_sum
#' @export
pillar::type_sum
#' Summarize Type of Classified
#'
#' Summarizes type of classified.
#'
#' @param x classified
#' @importFrom pillar type_sum
#' @export
#' @keywords internal
#' @method type_sum classified
#' @examples
#' type_sum(classified(0))
type_sum.classified <- function(x){
'clfac'
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.65,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/urinary_tract/urinary_tract_071.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/urinary_tract/urinary_tract_071.R | no_license | leon1003/QSMART | R | false | false | 371 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.65,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/urinary_tract/urinary_tract_071.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#input Harry Potter books
input <- if (packageVersion("devtools") < 1.6) {
install.packages("devtools")}
devtools::install_github("bradleyboehmke/harrypotter")
| /Team 1/inputHPBW.R | no_license | PHP2560-Statistical-Programming-R/text-mining-review-all-join-this-team | R | false | false | 162 | r | #input Harry Potter books
input <- if (packageVersion("devtools") < 1.6) {
install.packages("devtools")}
devtools::install_github("bradleyboehmke/harrypotter")
|
library(TMDb)
### Name: network
### Title: Get the name of a TV network by ID.
### Aliases: network
### Keywords: network
### ** Examples
## Not run:
##D ## An example of an authenticated request,
##D ## where api_key is fictitious.
##D ## You can obtain your own at https://www.themoviedb.org/documentation/api
##D
##D api_key <- "key"
##D
##D network(api_key = api_key, id = 49)
## End(Not run)
| /data/genthat_extracted_code/TMDb/examples/network.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 408 | r | library(TMDb)
### Name: network
### Title: Get the name of a TV network by ID.
### Aliases: network
### Keywords: network
### ** Examples
## Not run:
##D ## An example of an authenticated request,
##D ## where api_key is fictitious.
##D ## You can obtain your own at https://www.themoviedb.org/documentation/api
##D
##D api_key <- "key"
##D
##D network(api_key = api_key, id = 49)
## End(Not run)
|
## tests for two phase III trials ##
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = TRUE)[1],
utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = TRUE)[1])
})
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 1, fixed = FALSE)[1],
utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 1, fixed = FALSE)[1])
})
test_that("Function returns -9999 if constraint can not be satisfied", {
skip_on_cran()
expect_equal(utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = 0.8,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = FALSE)[1], -9999)
})
## tests for three phase III trials ##
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = TRUE)[1],
utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = TRUE)[1])
})
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = FALSE)[1],
utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = FALSE)[1])
})
test_that("Function returns -9999 if constraint can not be satisfied", {
skip_on_cran()
expect_equal(utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = 0.8,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = TRUE)[1], -9999)
})
test_that("Probability for small is smaller than all", {
skip_on_cran()
expect_lte(EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "small", fixed = FALSE),
EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "all", fixed = FALSE))
})
test_that("Probability for large is smaller than all", {
skip_on_cran()
expect_lte(EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "large", fixed = FALSE),
EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "all", fixed = FALSE))
})
## tests for four phase III trials ##
test_that("Higher treatment effect leads to higher utility", {
expect_lte(utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 4, fixed = TRUE)[1],
utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 4, fixed = TRUE)[1])
})
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = FALSE)[1],
utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = FALSE)[1])
})
## tests for case 23 ##
test_that("utility23_normal works",{
skip_on_cran()
expect_equal(utility23_normal(n2 = 50, kappa = 0.2, w = 0.3,
alpha = 0.025, beta = 0.1, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
b1 = 3000, b2 = 8000, b3 = 10000)[1], 1844.2313)
})
| /tests/testthat/test-functions_multitrial_normal.R | permissive | Sterniii3/drugdevelopR | R | false | false | 9,247 | r | ## tests for two phase III trials ##
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = TRUE)[1],
utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = TRUE)[1])
})
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 1, fixed = FALSE)[1],
utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 1, fixed = FALSE)[1])
})
test_that("Function returns -9999 if constraint can not be satisfied", {
skip_on_cran()
expect_equal(utility2_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = 0.8,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = FALSE)[1], -9999)
})
## tests for three phase III trials ##
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = TRUE)[1],
utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = TRUE)[1])
})
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = FALSE)[1],
utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = FALSE)[1])
})
test_that("Function returns -9999 if constraint can not be satisfied", {
skip_on_cran()
expect_equal(utility3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = 0.8,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 2, fixed = TRUE)[1], -9999)
})
test_that("Probability for small is smaller than all", {
skip_on_cran()
expect_lte(EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "small", fixed = FALSE),
EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "all", fixed = FALSE))
})
test_that("Probability for large is smaller than all", {
skip_on_cran()
expect_lte(EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "large", fixed = FALSE),
EPsProg3_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1,
w = 0.3, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
case = 3, size = "all", fixed = FALSE))
})
## tests for four phase III trials ##
test_that("Higher treatment effect leads to higher utility", {
expect_lte(utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 4, fixed = TRUE)[1],
utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 4, fixed = TRUE)[1])
})
test_that("Higher treatment effect leads to higher utility", {
skip_on_cran()
expect_lte(utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.375, Delta2 = 0.625, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = FALSE)[1],
utility4_normal(kappa = 0.1, n2 = 50, alpha = 0.025, beta = 0.1, w = 0.3,
Delta1 = 0.5, Delta2 = 0.8, in1 = 300, in2 = 600,
a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
K = Inf, N = Inf, S = -Inf,
b1 = 3000, b2 = 8000, b3 = 10000,
case = 3, fixed = FALSE)[1])
})
## tests for case 23 ##
test_that("utility23_normal works",{
skip_on_cran()
expect_equal(utility23_normal(n2 = 50, kappa = 0.2, w = 0.3,
alpha = 0.025, beta = 0.1, Delta1 = 0.375, Delta2 = 0.625,
in1 = 300, in2 = 600, a = 0.25, b = 0.75,
c2 = 0.675, c3 = 0.72, c02 = 15, c03 = 20,
b1 = 3000, b2 = 8000, b3 = 10000)[1], 1844.2313)
})
|
##################################################################################################################"
# #
# Calculating gene pool-specific genomic relationship matrices #
# #
# Juliette Archambeau #
# 18/03/2022 #
# #
##################################################################################################################"
# In this script, we calculate the gene-pool specific genomic relationship matrices (GRM) following the steps below:
# 1/ keeping only alleles that are not associated with height (i.e. the 'neutral' alleles)
# 2/ calculate a GRM based on all gene pools
# 3/ calculate gene-pool specific GRMs
# More details can be found here:
# - for steps 1 and 2: https://github.com/JulietteArchambeau/HeightPinpinClonapin/blob/master/reports/ExplanatoryVariables/GenomicRelationshipMatrices.Rmd
# - for step 3: https://github.com/JulietteArchambeau/HeightPinpinClonapin/blob/master/reports/ExplanatoryVariables/GenePoolSpecificKinshipMatrix.Rmd
# Packages:
library(readr) # CRAN v1.3.1
library(AGHmatrix) # CRAN v1.0.2
library(gplots) # CRAN v3.0.1.2
library(matrixcalc) # CRAN v1.0-3
library(Matrix) # CRAN v1.2-18
library(tidyverse) # CRAN v1.3.0
library(bdsmatrix) # CRAN v1.3-3
# Load the genomic data:
geno <- read.csv("~/Documents/Pinpin_Clonapin/HeightPinpinClonapin/data_DRYAD/GenomicData_5165SNPs_523clones.csv", row.names=1)
# 1/ Splitting 'neutral' and height-associated SNPs
# =================================================
# We use the outputs from a previous study: de Miguel et al. 2022.
# Here the link to the study: https://onlinelibrary.wiley.com/doi/full/10.1111/mec.16367?casa_token=1nNTc88Iy40AAAAA%3ALd4EOK5ehk_cEHIkw5A9l8nk0NPzUzlYPX8eAjVCikIjHP0WJ1kxoHJSZjMLFsZcP-8wdbNuNrlOfp1jzw
# In this study, the authors used the implemented in Bayesian variable selection regression implemented in the piMASS software to identify the SNPs associated with height
# in the five CLONAPIN common gardens. They found about 350 height associated SNPs.
# Here we use the piMASS outputs from de Miguel et al. (2022) to remove the SNPs potentially associated with height
# and calculate the GRM only based on 'neutral' SNPs.
# piMASS = Posterior inference using Model Averaging and Subset Selection. Details here: https://stephenslab.uchicago.edu/software.html#pimass
# We load the piMASS outputs:
beta.snp <- read_delim("data_DRYAD/height_all_sites_res.mcmc.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
# To select about 350 SNPs considered to be associated with height,
# we set of threshold of 0.0006 for the absolute values of the Rao-Blackwellized estimates of the posterior effect size (column 'betarb')
# with this threshold, we identify 322 SNPs associated with height.
# 322 height-associated SNPs
snps.h <- beta.snp$rs[abs(beta.snp$betarb)>0.006|abs(beta.snp$betarb)==0.006]
# 4,843 SNPs not associated with height ('neutral')
snps.n <- beta.snp$rs[abs(beta.snp$betarb)<0.006]
# We subset the genomic dataset 'geno' to obtain a dataset with only the genotypes of SNPs not associated with height.
geno <- geno[row.names(geno) %in% snps.n,] # dataset with 4843 SNPs and 523 clones
# 2/ Estimating the GRM based on all gene pools
# =============================================
# We use the function 'Gmatrix' of the package `AGHmatrix` to calculate the GRM with the VanRaden method.
# we format the genomic data for the 'Gmatrix' function:
geno[,1:dim(geno)[[2]]] <- apply(geno[ , 1:dim(geno)[[2]]], 2, function(x) as.numeric(as.character(x))) # we replace integers by numeric values
mat <- t(as.matrix(geno)) # matrix with SNPs in columns and clones in rows
# estimate the GRM:
GRM <- Gmatrix(mat,method="VanRaden", missingValue = NA, verify.posdef=T)
# to vizualize the GRM:
heatmap(GRM)
heatmap.2(as.matrix(GRM),scale="row",col=brewer.pal(11,"RdBu"),trace="none")
# The matrix has to be positive definite
# but this is not the case, 1 eigen value is lower than 0
sum(eigen(GRM)$values<0)
eigen(GRM)$values[eigen(GRM)$values<0]
# so we use of the 'nearPD' function of the package 'Matrix' to correct it:
GRM <- as.matrix(nearPD(GRM)$mat)
is.positive.definite(GRM) # now that's ok, the matrix is positive definite
# 3/ Estimating gene pool-specific GRM
# ====================================
# We follow the methodology of Muff et al. 2019
# Here the link to the paper: https://gsejournal.biomedcentral.com/track/pdf/10.1186/s12711-019-0449-7.pdf
# 3.a/ Building the Q-matrix
# ==========================
# We need the Q matrix describing the genetic population structure,
# i.e. the proportion of assignement of each clone to each gene pool
# we load the dataset containing the population structure data and keep only one row per clone
Qmat <- read_csv("data_DRYAD/HeightClimateSoilData_33121obs_32variables.csv") %>% as.data.frame()
Qmat <- Qmat[,c("clon",paste0("Q",rep(1:6)))]
Qmat <- unique(Qmat)
# Due to approximations, there are 18 negative values (with a value of -0.001) in the gene pool Q6
sum(Qmat[,c(paste0("Q",rep(1:6)))]<0)
filter_at(Qmat,c(paste0("Q",rep(1:6))),any_vars(. < 0))
# we set these negative values to 0.
Qmat$Q6[Qmat$Q6<0] <- 0
# clones as rownames instead of in the first column
row.names(Qmat) <- Qmat$clon
Qmat$clon <- NULL
# 3.b/ Generalized Cholesky decomposition of the GRM
# ==================================================
A.gchol <- gchol(GRM)
# Matrix T
T <- as.matrix(A.gchol)
# lower triangular matrix with diagonal elements equal to 1
# and elements below the diagonal in the respective column correspond to the expected proportion of the genome that is shared among clones.
# Transpose of matrix T
Tt <- t(as.matrix(A.gchol))
# Diagonal matrix D
diag.A <- diag(A.gchol) # vector of numeric values
D <- Diagonal(x=diag.A)
# 3.c/ Gene pool-specific matrices
# ================================
diij <- rep(1,523)
Dj <- Diagonal(x=diij)
# GRM specific to the Q1 gene pool, i.e. the Northern Africa gene pool
D1 <- Diagonal(x=Qmat[,1])
T1 <- T %*% D1
A1 <- T1 %*% Dj %*% t(T1)
is.symmetric.matrix(as.matrix(A1)) # The matrix is symetric
is.positive.definite(as.matrix(A1)) # The matrix is not positive definite
A1 <- nearPD(A1) # we approximate the matrix to the nearest positive definite matrix
write.csv(as.matrix(A1$mat), file= paste0("data_DRYAD/GRM_A1.csv"))
# We do the same for the other gene pools
# Q2 gene pool, i.e. the Corsican gene pool
D2 <- Diagonal(x=Qmat[,2])
T2 <- T %*% D2
A2 <- T2 %*% Dj %*% t(T2)
is.symmetric.matrix(as.matrix(A2))
is.positive.definite(as.matrix(A2))
A2 <- nearPD(A2)
write.csv(as.matrix(A2$mat), file= paste0("data_DRYAD/GRM_A2.csv"))
# Q3 gene pool, i.e. the Central Spain gene pool
D3 <- Diagonal(x=Qmat[,3])
T3 <- T %*% D3
A3 <- T3 %*% Dj %*% t(T3)
is.symmetric.matrix(as.matrix(A3))
is.positive.definite(as.matrix(A3))
A3 <- nearPD(A3)
write.csv(as.matrix(A3$mat), file= paste0("data_DRYAD/GRM_A3.csv"))
# Q4 gene pool, i.e. the Atlantic French gene pool
D4 <- Diagonal(x=Qmat[,4])
T4 <- T %*% D4
A4 <- T4 %*% Dj %*% t(T4)
is.symmetric.matrix(as.matrix(A4))
is.positive.definite(as.matrix(A4))
A4 <- nearPD(A4)
write.csv(as.matrix(A4$mat), file= paste0("data_DRYAD/GRM_A4.csv"))
# Q5 gene pool, i.e. the Atlantic Iberian gene pool
D5 <- Diagonal(x=Qmat[,5])
T5 <- T %*% D5
A5 <- T5 %*% Dj %*% t(T5)
is.symmetric.matrix(as.matrix(A5))
is.positive.definite(as.matrix(A5))
A5 <- nearPD(A5)
write.csv(as.matrix(A5$mat), file= paste0("data_DRYAD/GRM_A5.csv"))
# Q6 gene pool, i.e. the south-eastern Spain gene pool
D6 <- Diagonal(x=Qmat[,6])
T6 <- T %*% D6
A6 <- T6 %*% Dj %*% t(T6)
is.symmetric.matrix(as.matrix(A6))
is.positive.definite(as.matrix(A6))
A6 <- nearPD(A6)
write.csv(as.matrix(A6$mat), file= paste0("data_DRYAD/GRM_A6.csv"))
| /scripts_DRYAD/4_CalculateGenePoolSpecificGRM.R | no_license | JulietteArchambeau/HeightPinpinClonapin | R | false | false | 8,354 | r | ##################################################################################################################"
# #
# Calculating gene pool-specific genomic relationship matrices #
# #
# Juliette Archambeau #
# 18/03/2022 #
# #
##################################################################################################################"
# In this script, we calculate the gene-pool specific genomic relationship matrices (GRM) following the steps below:
# 1/ keeping only alleles that are not associated with height (i.e. the 'neutral' alleles)
# 2/ calculate a GRM based on all gene pools
# 3/ calculate gene-pool specific GRMs
# More details can be found here:
# - for steps 1 and 2: https://github.com/JulietteArchambeau/HeightPinpinClonapin/blob/master/reports/ExplanatoryVariables/GenomicRelationshipMatrices.Rmd
# - for step 3: https://github.com/JulietteArchambeau/HeightPinpinClonapin/blob/master/reports/ExplanatoryVariables/GenePoolSpecificKinshipMatrix.Rmd
# Packages:
library(readr) # CRAN v1.3.1
library(AGHmatrix) # CRAN v1.0.2
library(gplots) # CRAN v3.0.1.2
library(matrixcalc) # CRAN v1.0-3
library(Matrix) # CRAN v1.2-18
library(tidyverse) # CRAN v1.3.0
library(bdsmatrix) # CRAN v1.3-3
# Load the genomic data:
geno <- read.csv("~/Documents/Pinpin_Clonapin/HeightPinpinClonapin/data_DRYAD/GenomicData_5165SNPs_523clones.csv", row.names=1)
# 1/ Splitting 'neutral' and height-associated SNPs
# =================================================
# We use the outputs from a previous study: de Miguel et al. 2022.
# Here the link to the study: https://onlinelibrary.wiley.com/doi/full/10.1111/mec.16367?casa_token=1nNTc88Iy40AAAAA%3ALd4EOK5ehk_cEHIkw5A9l8nk0NPzUzlYPX8eAjVCikIjHP0WJ1kxoHJSZjMLFsZcP-8wdbNuNrlOfp1jzw
# In this study, the authors used the implemented in Bayesian variable selection regression implemented in the piMASS software to identify the SNPs associated with height
# in the five CLONAPIN common gardens. They found about 350 height associated SNPs.
# Here we use the piMASS outputs from de Miguel et al. (2022) to remove the SNPs potentially associated with height
# and calculate the GRM only based on 'neutral' SNPs.
# piMASS = Posterior inference using Model Averaging and Subset Selection. Details here: https://stephenslab.uchicago.edu/software.html#pimass
# We load the piMASS outputs:
beta.snp <- read_delim("data_DRYAD/height_all_sites_res.mcmc.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
# To select about 350 SNPs considered to be associated with height,
# we set of threshold of 0.0006 for the absolute values of the Rao-Blackwellized estimates of the posterior effect size (column 'betarb')
# with this threshold, we identify 322 SNPs associated with height.
# 322 height-associated SNPs
snps.h <- beta.snp$rs[abs(beta.snp$betarb)>0.006|abs(beta.snp$betarb)==0.006]
# 4,843 SNPs not associated with height ('neutral')
snps.n <- beta.snp$rs[abs(beta.snp$betarb)<0.006]
# We subset the genomic dataset 'geno' to obtain a dataset with only the genotypes of SNPs not associated with height.
geno <- geno[row.names(geno) %in% snps.n,] # dataset with 4843 SNPs and 523 clones
# 2/ Estimating the GRM based on all gene pools
# =============================================
# We use the function 'Gmatrix' of the package `AGHmatrix` to calculate the GRM with the VanRaden method.
# we format the genomic data for the 'Gmatrix' function:
geno[,1:dim(geno)[[2]]] <- apply(geno[ , 1:dim(geno)[[2]]], 2, function(x) as.numeric(as.character(x))) # we replace integers by numeric values
mat <- t(as.matrix(geno)) # matrix with SNPs in columns and clones in rows
# estimate the GRM:
GRM <- Gmatrix(mat,method="VanRaden", missingValue = NA, verify.posdef=T)
# to vizualize the GRM:
heatmap(GRM)
heatmap.2(as.matrix(GRM),scale="row",col=brewer.pal(11,"RdBu"),trace="none")
# The matrix has to be positive definite
# but this is not the case, 1 eigen value is lower than 0
sum(eigen(GRM)$values<0)
eigen(GRM)$values[eigen(GRM)$values<0]
# so we use of the 'nearPD' function of the package 'Matrix' to correct it:
GRM <- as.matrix(nearPD(GRM)$mat)
is.positive.definite(GRM) # now that's ok, the matrix is positive definite
# 3/ Estimating gene pool-specific GRM
# ====================================
# We follow the methodology of Muff et al. 2019
# Here the link to the paper: https://gsejournal.biomedcentral.com/track/pdf/10.1186/s12711-019-0449-7.pdf
# 3.a/ Building the Q-matrix
# ==========================
# We need the Q matrix describing the genetic population structure,
# i.e. the proportion of assignement of each clone to each gene pool
# we load the dataset containing the population structure data and keep only one row per clone
Qmat <- read_csv("data_DRYAD/HeightClimateSoilData_33121obs_32variables.csv") %>% as.data.frame()
Qmat <- Qmat[,c("clon",paste0("Q",rep(1:6)))]
Qmat <- unique(Qmat)
# Due to approximations, there are 18 negative values (with a value of -0.001) in the gene pool Q6
sum(Qmat[,c(paste0("Q",rep(1:6)))]<0)
filter_at(Qmat,c(paste0("Q",rep(1:6))),any_vars(. < 0))
# we set these negative values to 0.
Qmat$Q6[Qmat$Q6<0] <- 0
# clones as rownames instead of in the first column
row.names(Qmat) <- Qmat$clon
Qmat$clon <- NULL
# 3.b/ Generalized Cholesky decomposition of the GRM
# ==================================================
A.gchol <- gchol(GRM)
# Matrix T
T <- as.matrix(A.gchol)
# lower triangular matrix with diagonal elements equal to 1
# and elements below the diagonal in the respective column correspond to the expected proportion of the genome that is shared among clones.
# Transpose of matrix T
Tt <- t(as.matrix(A.gchol))
# Diagonal matrix D
diag.A <- diag(A.gchol) # vector of numeric values
D <- Diagonal(x=diag.A)
# 3.c/ Gene pool-specific matrices
# ================================
diij <- rep(1,523)
Dj <- Diagonal(x=diij)
# GRM specific to the Q1 gene pool, i.e. the Northern Africa gene pool
D1 <- Diagonal(x=Qmat[,1])
T1 <- T %*% D1
A1 <- T1 %*% Dj %*% t(T1)
is.symmetric.matrix(as.matrix(A1)) # The matrix is symetric
is.positive.definite(as.matrix(A1)) # The matrix is not positive definite
A1 <- nearPD(A1) # we approximate the matrix to the nearest positive definite matrix
write.csv(as.matrix(A1$mat), file= paste0("data_DRYAD/GRM_A1.csv"))
# We do the same for the other gene pools
# Q2 gene pool, i.e. the Corsican gene pool
D2 <- Diagonal(x=Qmat[,2])
T2 <- T %*% D2
A2 <- T2 %*% Dj %*% t(T2)
is.symmetric.matrix(as.matrix(A2))
is.positive.definite(as.matrix(A2))
A2 <- nearPD(A2)
write.csv(as.matrix(A2$mat), file= paste0("data_DRYAD/GRM_A2.csv"))
# Q3 gene pool, i.e. the Central Spain gene pool
D3 <- Diagonal(x=Qmat[,3])
T3 <- T %*% D3
A3 <- T3 %*% Dj %*% t(T3)
is.symmetric.matrix(as.matrix(A3))
is.positive.definite(as.matrix(A3))
A3 <- nearPD(A3)
write.csv(as.matrix(A3$mat), file= paste0("data_DRYAD/GRM_A3.csv"))
# Q4 gene pool, i.e. the Atlantic French gene pool
D4 <- Diagonal(x=Qmat[,4])
T4 <- T %*% D4
A4 <- T4 %*% Dj %*% t(T4)
is.symmetric.matrix(as.matrix(A4))
is.positive.definite(as.matrix(A4))
A4 <- nearPD(A4)
write.csv(as.matrix(A4$mat), file= paste0("data_DRYAD/GRM_A4.csv"))
# Q5 gene pool, i.e. the Atlantic Iberian gene pool
D5 <- Diagonal(x=Qmat[,5])
T5 <- T %*% D5
A5 <- T5 %*% Dj %*% t(T5)
is.symmetric.matrix(as.matrix(A5))
is.positive.definite(as.matrix(A5))
A5 <- nearPD(A5)
write.csv(as.matrix(A5$mat), file= paste0("data_DRYAD/GRM_A5.csv"))
# Q6 gene pool, i.e. the south-eastern Spain gene pool
D6 <- Diagonal(x=Qmat[,6])
T6 <- T %*% D6
A6 <- T6 %*% Dj %*% t(T6)
is.symmetric.matrix(as.matrix(A6))
is.positive.definite(as.matrix(A6))
A6 <- nearPD(A6)
write.csv(as.matrix(A6$mat), file= paste0("data_DRYAD/GRM_A6.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomialfunctions.R
\name{bin_distribution}
\alias{bin_distribution}
\title{Binomial distribution}
\usage{
bin_distribution(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability of successes}
}
\value{
a data frame indicating the probabality of each number of successes
}
\description{
creates a dataframe for the probability of each number of successes with specified probability in a specified number of trials
}
\examples{
What is the binomial distribution with prob of success = 0.5 in 5 trials?
bin_distribution(trials = 5, prob = 0.5)
}
| /binomial/man/bin_distribution.Rd | no_license | stat133-sp19/hw-stat133-christinajin01 | R | false | true | 652 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomialfunctions.R
\name{bin_distribution}
\alias{bin_distribution}
\title{Binomial distribution}
\usage{
bin_distribution(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability of successes}
}
\value{
a data frame indicating the probabality of each number of successes
}
\description{
creates a dataframe for the probability of each number of successes with specified probability in a specified number of trials
}
\examples{
What is the binomial distribution with prob of success = 0.5 in 5 trials?
bin_distribution(trials = 5, prob = 0.5)
}
|
\name{OrderByLocus}
\alias{OrderByLocus}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ A function to order the fragment size vector for a given locus }
\description{
Used by other functions to sort the fragment sizes
}
\usage{
OrderByLocus(DataBase, marker)
}
\arguments{
\item{DataBase}{ A data base with loci and fragment size information }
\item{marker}{ The locus to be sorted }
}
\value{
An ordered fragment size vector for a given locus
}
\author{ Filipe Alberto}
\references{
Alberto F. MsatAllele_1.0: an R package to visualize the binning of microsatellite alleles Journal of Heredity. 100(3):394,397
}
\seealso{ \code{\link{subdataBase}}, \code{\link{AlleleHist}} }
\examples{data(DBase)
OrderByLocus(DBase,"BC-4")
}
\keyword{ manip }
| /man/OrderByLocus.Rd | no_license | kkeenan02/MsatAllele | R | false | false | 810 | rd | \name{OrderByLocus}
\alias{OrderByLocus}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ A function to order the fragment size vector for a given locus }
\description{
Used by other functions to sort the fragment sizes
}
\usage{
OrderByLocus(DataBase, marker)
}
\arguments{
\item{DataBase}{ A data base with loci and fragment size information }
\item{marker}{ The locus to be sorted }
}
\value{
An ordered fragment size vector for a given locus
}
\author{ Filipe Alberto}
\references{
Alberto F. MsatAllele_1.0: an R package to visualize the binning of microsatellite alleles Journal of Heredity. 100(3):394,397
}
\seealso{ \code{\link{subdataBase}}, \code{\link{AlleleHist}} }
\examples{data(DBase)
OrderByLocus(DBase,"BC-4")
}
\keyword{ manip }
|
myargument <- 3
setwd("/zhome/6e/9/133731/Desktop/Thesis/Thesis/Code")
wd <- getwd()
source(paste(wd,"/Coefficient_optimization/Multi_step_predictions/meta_optim_functions/meta_optim.r", sep = ""))
source(paste(wd,"/Coefficient_optimization/Multi_step_predictions/meta_optim_functions/evaluationPart_of_meta_optim.r", sep = ""))
load("Coefficient_optimization/Multi_step_predictions/Job_splitting/ARIMA/orders.RData")
##################### Reading in training data #####################
ts <- read.csv("../Data/Training_data/s2_training.txt", header = TRUE, sep = "\t")$Value
wwIndex <- read.csv("../Data/Training_data/s2_WW_training.txt", header = TRUE, sep = "\t")$Flag
d_validation <- read.csv("../Data/Validation_data/d_validation.txt", header = TRUE, sep = "\t")$Value
ts_validation <- read.csv("../Data/Validation_data/s2_validation.txt", header = TRUE, sep = "\t")$Value
timestamp_validation <- read.csv("../Data/Validation_data/s2_validation.txt", header = TRUE, sep = "\t")$Timestamp
order <- as.vector(orders[myargument,], mode = "numeric")
print(order)
orders <- order
External_Regressor = FALSE
## Constants used to de-normalize the data
norm_const_d <- 23.8588
norm_const_station <- 26776.54 ## For station 2 I assume??
##################### Optimization for Dammning (station 1) #####################
start_time <- Sys.time()
results <- meta_optim(order, External_Regressor = FALSE)
end_time <- Sys.time()
results$Time <- end_time - start_time ## We want to see time as well in the list
| /Code/Coefficient_optimization/neldermead_optimization/Multi_step_predictions/meta_optim_functions/Old/test_meta_optim.r | no_license | arijoh/DataDrivenForecastModels | R | false | false | 1,521 | r |
myargument <- 3
setwd("/zhome/6e/9/133731/Desktop/Thesis/Thesis/Code")
wd <- getwd()
source(paste(wd,"/Coefficient_optimization/Multi_step_predictions/meta_optim_functions/meta_optim.r", sep = ""))
source(paste(wd,"/Coefficient_optimization/Multi_step_predictions/meta_optim_functions/evaluationPart_of_meta_optim.r", sep = ""))
load("Coefficient_optimization/Multi_step_predictions/Job_splitting/ARIMA/orders.RData")
##################### Reading in training data #####################
ts <- read.csv("../Data/Training_data/s2_training.txt", header = TRUE, sep = "\t")$Value
wwIndex <- read.csv("../Data/Training_data/s2_WW_training.txt", header = TRUE, sep = "\t")$Flag
d_validation <- read.csv("../Data/Validation_data/d_validation.txt", header = TRUE, sep = "\t")$Value
ts_validation <- read.csv("../Data/Validation_data/s2_validation.txt", header = TRUE, sep = "\t")$Value
timestamp_validation <- read.csv("../Data/Validation_data/s2_validation.txt", header = TRUE, sep = "\t")$Timestamp
order <- as.vector(orders[myargument,], mode = "numeric")
print(order)
orders <- order
External_Regressor = FALSE
## Constants used to de-normalize the data
norm_const_d <- 23.8588
norm_const_station <- 26776.54 ## For station 2 I assume??
##################### Optimization for Dammning (station 1) #####################
start_time <- Sys.time()
results <- meta_optim(order, External_Regressor = FALSE)
end_time <- Sys.time()
results$Time <- end_time - start_time ## We want to see time as well in the list
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lisrel.R
\name{lisrel_xy}
\alias{lisrel_xy}
\title{LISREL Structural Equations with Latent Variables (xy).}
\usage{
lisrel_xy(LY, LX, inv, GA, PH)
}
\arguments{
\item{LY}{\eqn{\boldsymbol{\Lambda}_{\mathbf{y}}}
\eqn{p \times m}
matrix of factor loadings
(\eqn{\boldsymbol{\lambda}}).
\eqn{p}
is the number of observed indicators
(\eqn{\mathbf{y}})
and
\eqn{m}
is the number of latent endogenous variables
(\eqn{\boldsymbol{\eta}}).}
\item{LX}{\eqn{\boldsymbol{\Lambda}_{\mathbf{x}}}
\eqn{q \times n}
matrix of factor loadings
(\eqn{\boldsymbol{\lambda}}).
\eqn{q}
is the number of observed indicators
(\eqn{\mathbf{x}})
and
\eqn{n}
is the number of latent exogenous variables
(\eqn{\boldsymbol{\xi}}).}
\item{inv}{The inverse of \code{I} minus \code{BE}
(\eqn{\left( \mathbf{I} - \mathbf{B} \right)^{-1}}).}
\item{GA}{\eqn{\boldsymbol{\Gamma}_{m \times n}}
coefficient matrix
for exogenous variables.}
\item{PH}{\eqn{\boldsymbol{\Phi}_{n \times n}}
variance-covariance matrix of
\eqn{\boldsymbol{\xi}}.}
}
\value{
Returns the model-implied variance-covariance matrix for
\eqn{\mathbf{xy}}
(\eqn{\boldsymbol{\Sigma}_{\mathbf{xy}} \left( \boldsymbol{\theta} \right)})
derived from the
\eqn{\boldsymbol{\Lambda}_{\mathbf{y}}}
(\code{LY}),
\eqn{\boldsymbol{\Lambda}_{\mathbf{x}}}
(\code{LX}),
\eqn{\mathbf{B}}
(\code{BE}),
\eqn{\mathbf{I}}
(\code{I}),
\eqn{\boldsymbol{\Gamma}}
(\code{GA}),
and
\eqn{\boldsymbol{\Phi}}
(\code{PH})
matrices.
}
\description{
Model-implied variance-covariance matrix for \eqn{\mathbf{xy}} variables
(\eqn{\boldsymbol{\Sigma}_{\mathbf{xy}} \left( \boldsymbol{\theta} \right)})
using the LISREL notation
for structural equations with latent variables.
}
\details{
\deqn{\boldsymbol{\Sigma}_{\mathbf{xy}} \left( \boldsymbol{\theta} \right)
=
\boldsymbol{\Lambda}_{\mathbf{x}}
\boldsymbol{\Phi}
\boldsymbol{\Gamma}^{T}
\left[ \left( \mathbf{I} - \mathbf{B} \right)^{-1} \right]^{T}
\boldsymbol{\Lambda}_{\mathbf{y}}^{T}
}
}
\references{
Bollen, K. A. (1989).
\emph{Structural equations with latent variables}.
New York: Wiley.
Jöreskog, K. G., & Sörbom, D. (1996).
\emph{Lisrel 8: User's reference guide} (2nd ed.).
Scientific Software.
}
\seealso{
Other SEM notation functions:
\code{\link{eqs_mu}()},
\code{\link{eqs}()},
\code{\link{lisrel_fa}()},
\code{\link{lisrel_obs_xy}()},
\code{\link{lisrel_obs_yx}()},
\code{\link{lisrel_obs_yy}()},
\code{\link{lisrel_obs}()},
\code{\link{lisrel_xx}()},
\code{\link{lisrel_yx}()},
\code{\link{lisrel_yy}()},
\code{\link{lisrel}()},
\code{\link{ram_mu}()},
\code{\link{ram_m}()},
\code{\link{ram_s}()},
\code{\link{ram}()},
\code{\link{sem_fa}()},
\code{\link{sem_lat}()},
\code{\link{sem_obs}()}
}
\author{
Ivan Jacob Agaloos Pesigan
}
\concept{SEM notation functions}
\keyword{lisrel}
\keyword{matrix}
| /man/lisrel_xy.Rd | permissive | jeksterslabds/jeksterslabRds | R | false | true | 2,869 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lisrel.R
\name{lisrel_xy}
\alias{lisrel_xy}
\title{LISREL Structural Equations with Latent Variables (xy).}
\usage{
lisrel_xy(LY, LX, inv, GA, PH)
}
\arguments{
\item{LY}{\eqn{\boldsymbol{\Lambda}_{\mathbf{y}}}
\eqn{p \times m}
matrix of factor loadings
(\eqn{\boldsymbol{\lambda}}).
\eqn{p}
is the number of observed indicators
(\eqn{\mathbf{y}})
and
\eqn{m}
is the number of latent endogenous variables
(\eqn{\boldsymbol{\eta}}).}
\item{LX}{\eqn{\boldsymbol{\Lambda}_{\mathbf{x}}}
\eqn{q \times n}
matrix of factor loadings
(\eqn{\boldsymbol{\lambda}}).
\eqn{q}
is the number of observed indicators
(\eqn{\mathbf{x}})
and
\eqn{n}
is the number of latent exogenous variables
(\eqn{\boldsymbol{\xi}}).}
\item{inv}{The inverse of \code{I} minus \code{BE}
(\eqn{\left( \mathbf{I} - \mathbf{B} \right)^{-1}}).}
\item{GA}{\eqn{\boldsymbol{\Gamma}_{m \times n}}
coefficient matrix
for exogenous variables.}
\item{PH}{\eqn{\boldsymbol{\Phi}_{n \times n}}
variance-covariance matrix of
\eqn{\boldsymbol{\xi}}.}
}
\value{
Returns the model-implied variance-covariance matrix for
\eqn{\mathbf{xy}}
(\eqn{\boldsymbol{\Sigma}_{\mathbf{xy}} \left( \boldsymbol{\theta} \right)})
derived from the
\eqn{\boldsymbol{\Lambda}_{\mathbf{y}}}
(\code{LY}),
\eqn{\boldsymbol{\Lambda}_{\mathbf{x}}}
(\code{LX}),
\eqn{\mathbf{B}}
(\code{BE}),
\eqn{\mathbf{I}}
(\code{I}),
\eqn{\boldsymbol{\Gamma}}
(\code{GA}),
and
\eqn{\boldsymbol{\Phi}}
(\code{PH})
matrices.
}
\description{
Model-implied variance-covariance matrix for \eqn{\mathbf{xy}} variables
(\eqn{\boldsymbol{\Sigma}_{\mathbf{xy}} \left( \boldsymbol{\theta} \right)})
using the LISREL notation
for structural equations with latent variables.
}
\details{
\deqn{\boldsymbol{\Sigma}_{\mathbf{xy}} \left( \boldsymbol{\theta} \right)
=
\boldsymbol{\Lambda}_{\mathbf{x}}
\boldsymbol{\Phi}
\boldsymbol{\Gamma}^{T}
\left[ \left( \mathbf{I} - \mathbf{B} \right)^{-1} \right]^{T}
\boldsymbol{\Lambda}_{\mathbf{y}}^{T}
}
}
\references{
Bollen, K. A. (1989).
\emph{Structural equations with latent variables}.
New York: Wiley.
Jöreskog, K. G., & Sörbom, D. (1996).
\emph{Lisrel 8: User's reference guide} (2nd ed.).
Scientific Software.
}
\seealso{
Other SEM notation functions:
\code{\link{eqs_mu}()},
\code{\link{eqs}()},
\code{\link{lisrel_fa}()},
\code{\link{lisrel_obs_xy}()},
\code{\link{lisrel_obs_yx}()},
\code{\link{lisrel_obs_yy}()},
\code{\link{lisrel_obs}()},
\code{\link{lisrel_xx}()},
\code{\link{lisrel_yx}()},
\code{\link{lisrel_yy}()},
\code{\link{lisrel}()},
\code{\link{ram_mu}()},
\code{\link{ram_m}()},
\code{\link{ram_s}()},
\code{\link{ram}()},
\code{\link{sem_fa}()},
\code{\link{sem_lat}()},
\code{\link{sem_obs}()}
}
\author{
Ivan Jacob Agaloos Pesigan
}
\concept{SEM notation functions}
\keyword{lisrel}
\keyword{matrix}
|
library(geoR)
# to get lat and lon from Google
library(ggmap)
library(rgdal)
library(tidyverse)
library(DT)
library(knitr)
library(sp)
library(rgeos)
library(ggplot2)
library(ggthemes)
library(outliers)
library(maptools)
getCurrentFileLocation <- function()
{
this_file <- commandArgs() %>%
tibble::enframe(name = NULL) %>%
tidyr::separate(col=value, into=c("key", "value"), sep="=", fill='right') %>%
dplyr::filter(key == "--file") %>%
dplyr::pull(value)
if (length(this_file)==0)
{
this_file <- rstudioapi::getSourceEditorContext()$path
}
return(dirname(this_file))
}
path = getCurrentFileLocation()
setwd(path)
db = read.csv('db.csv')
#converting date format (min = 2016-09-01, max=2017-08-31)
db$SALE.DATE = format(as.Date(db$SALE.DATE))
#db = subset(db,SALE.DATE <= as.Date('2016/12/31'))
#cleaning the data: replacing '-' in SALE.PRICE
db$SALE.PRICE <- as.character(db$SALE.PRICE)
db$SALE.PRICE[db$SALE.PRICE == "-"] = ""
db$SALE.PRICE <- as.integer(db$SALE.PRICE)
#cleaning the data: replacing '-' in GROSS.SQUARE.FEET
db$GROSS.SQUARE.FEET <- as.character(db$GROSS.SQUARE.FEET)
db$GROSS.SQUARE.FEET[db$GROSS.SQUARE.FEET == "-"] = ""
db$GROSS.SQUARE.FEET <- as.integer(db$GROSS.SQUARE.FEET)
#cleaning the data: replacing '-' in LAND.SQUARE.FEET
db$LAND.SQUARE.FEET <- as.character(db$LAND.SQUARE.FEET)
db$LAND.SQUARE.FEET[db$LAND.SQUARE.FEET == "-"] = ""
db$LAND.SQUARE.FEET <- as.integer(db$LAND.SQUARE.FEET)
#cleaning the data: removing lines for which SALES.PRICE is too low
#db = subset(db,is.na(SALE.PRICE)==FALSE)
db = subset(db,SALE.PRICE>10)
#cleaning the data: removing lines for which there is no information on GROSS.SQUARE.FEET
db = subset(db,is.na(GROSS.SQUARE.FEET)==FALSE)
db = subset(db,GROSS.SQUARE.FEET>0)
#we add price per square feet:
db$PRICE.SQUARE.FEET = db$SALE.PRICE/db$GROSS.SQUARE.FEET
#we create a function that returns the borough's name, and add it to the database
borough_name_fun = function(code){
if(code == 1){
b_name = 'Manhattan'
} else if (code == 2){
b_name = 'Bronx'
} else if (code == 3){
b_name = 'Brooklyn'
} else if (code == 4){
b_name = 'Queens'
} else if (code == 5){
b_name = 'Staten Island'
} else {
b_name = 'N/I'
}
return(b_name)
}
db$BOROUGHNAME = lapply(db$BOROUGH, borough_name_fun)
#we create a full address column
db$location = paste0(db$ADDRESS, ", ", db$BOROUGHNAME, ", ", db$ZIP.CODE , " - New York")
# the sample is too big for Google API, so for now, we take just 2500 entries, selected randomly
# and we set the seed to make our partition reproductible
db$ID <- seq.int(nrow(db))
db1 = subset(db,ID<=2499)
db2 = subset(db,ID>2499 & ID<=4999)
db3 = subset(db,ID>4999 & ID<=7499)
db4 = subset(db,ID>7499 & ID<=9999)
db5 = subset(db,ID>9999)
###
smp_size = floor(2500/12949 * nrow(db))
set.seed(123)
reduced_sample = sample(seq_len(nrow(db)), size = smp_size)
reduced_sample = db[reduced_sample, ]
db = reduced_sample
#We get longitude and latitude from google API
geo = geocode(location = db5$location, output="latlon", source="google")
db5$lon = geo$lon
db5$lat = geo$lat
x = rbind(db1,db2,db3,db4,db5)
x = subset(x,is.na(lon)==FALSE)
x <- apply(x,2,as.character)
#Creating final db
db = rbind(db1,db2,db3,db4,db5)
db = subset(db,is.na(lon)==FALSE)
#Saving into a new file
write.csv(x, file = 'db.csv')
| /data-prep.R | no_license | jbdatascience/kriging | R | false | false | 3,361 | r | library(geoR)
# to get lat and lon from Google
library(ggmap)
library(rgdal)
library(tidyverse)
library(DT)
library(knitr)
library(sp)
library(rgeos)
library(ggplot2)
library(ggthemes)
library(outliers)
library(maptools)
getCurrentFileLocation <- function()
{
this_file <- commandArgs() %>%
tibble::enframe(name = NULL) %>%
tidyr::separate(col=value, into=c("key", "value"), sep="=", fill='right') %>%
dplyr::filter(key == "--file") %>%
dplyr::pull(value)
if (length(this_file)==0)
{
this_file <- rstudioapi::getSourceEditorContext()$path
}
return(dirname(this_file))
}
path = getCurrentFileLocation()
setwd(path)
db = read.csv('db.csv')
#converting date format (min = 2016-09-01, max=2017-08-31)
db$SALE.DATE = format(as.Date(db$SALE.DATE))
#db = subset(db,SALE.DATE <= as.Date('2016/12/31'))
#cleaning the data: replacing '-' in SALE.PRICE
db$SALE.PRICE <- as.character(db$SALE.PRICE)
db$SALE.PRICE[db$SALE.PRICE == "-"] = ""
db$SALE.PRICE <- as.integer(db$SALE.PRICE)
#cleaning the data: replacing '-' in GROSS.SQUARE.FEET
db$GROSS.SQUARE.FEET <- as.character(db$GROSS.SQUARE.FEET)
db$GROSS.SQUARE.FEET[db$GROSS.SQUARE.FEET == "-"] = ""
db$GROSS.SQUARE.FEET <- as.integer(db$GROSS.SQUARE.FEET)
#cleaning the data: replacing '-' in LAND.SQUARE.FEET
db$LAND.SQUARE.FEET <- as.character(db$LAND.SQUARE.FEET)
db$LAND.SQUARE.FEET[db$LAND.SQUARE.FEET == "-"] = ""
db$LAND.SQUARE.FEET <- as.integer(db$LAND.SQUARE.FEET)
#cleaning the data: removing lines for which SALES.PRICE is too low
#db = subset(db,is.na(SALE.PRICE)==FALSE)
db = subset(db,SALE.PRICE>10)
#cleaning the data: removing lines for which there is no information on GROSS.SQUARE.FEET
db = subset(db,is.na(GROSS.SQUARE.FEET)==FALSE)
db = subset(db,GROSS.SQUARE.FEET>0)
#we add price per square feet:
db$PRICE.SQUARE.FEET = db$SALE.PRICE/db$GROSS.SQUARE.FEET
#we create a function that returns the borough's name, and add it to the database
borough_name_fun = function(code){
if(code == 1){
b_name = 'Manhattan'
} else if (code == 2){
b_name = 'Bronx'
} else if (code == 3){
b_name = 'Brooklyn'
} else if (code == 4){
b_name = 'Queens'
} else if (code == 5){
b_name = 'Staten Island'
} else {
b_name = 'N/I'
}
return(b_name)
}
db$BOROUGHNAME = lapply(db$BOROUGH, borough_name_fun)
#we create a full address column
db$location = paste0(db$ADDRESS, ", ", db$BOROUGHNAME, ", ", db$ZIP.CODE , " - New York")
# the sample is too big for Google API, so for now, we take just 2500 entries, selected randomly
# and we set the seed to make our partition reproductible
db$ID <- seq.int(nrow(db))
db1 = subset(db,ID<=2499)
db2 = subset(db,ID>2499 & ID<=4999)
db3 = subset(db,ID>4999 & ID<=7499)
db4 = subset(db,ID>7499 & ID<=9999)
db5 = subset(db,ID>9999)
###
smp_size = floor(2500/12949 * nrow(db))
set.seed(123)
reduced_sample = sample(seq_len(nrow(db)), size = smp_size)
reduced_sample = db[reduced_sample, ]
db = reduced_sample
#We get longitude and latitude from google API
geo = geocode(location = db5$location, output="latlon", source="google")
db5$lon = geo$lon
db5$lat = geo$lat
x = rbind(db1,db2,db3,db4,db5)
x = subset(x,is.na(lon)==FALSE)
x <- apply(x,2,as.character)
#Creating final db
db = rbind(db1,db2,db3,db4,db5)
db = subset(db,is.na(lon)==FALSE)
#Saving into a new file
write.csv(x, file = 'db.csv')
|
#Bootcamp modeling exercises
#Assignment 2 - Ioana Anghel
#Ricker Model
###(a)
tt <- 0
N0 <- 100
rr <- 1.05
pop.size = numeric(11)
KK = 300
ttmax = 10
NN <-matrix(NA, nrow=1, ncol=ttmax+1)
NN[1] <- N0
#sets value at first point in vector
RickerFun <- function(rr,N0,KK,ttMax,PLOTFLAG=0){
NN <- rep(NA,ttMax+1)
NN[1] <- N0
for(tt in 1:ttMax) {
NN[tt+1] = NN[tt]*exp(rr*(1-(NN[tt]/KK)))
}
if (PLOTFLAG==1){
plot(1:(ttMax+1),NN, xlab = "Time",ylab="Population size", col='blue',type='l')
}
return(NN)
}
#to supress plot from generating whenever running the function RickerFun,
# PLOTFLAG=number other than the default, which in this case is 1
# ex: RickerFun(1.05,100,400,50,0)
#########################################################################################
#(b)
## (rr,N0,KK,ttMax,PLOTFLAG=1)
#Population decreases to n = 0.
# to reach zero population must have negative growth (decline)
RickerFun(-0.25,10,400,50)
#Population approaches stable equilibrium at n??? = K, without oscillations
RickerFun(0.15,10,400,50)
#Decaying oscillations around n??? = K.
RickerFun(1.5,100,400,50)
#Persistent, regular oscillations.
RickerFun(2.3,100,400,50)
#Crazy, random-looking ???uctuations (chaos).
# growth rate is so high that boom and bust cycles are not consistent
RickerFun(3,100,400,50)
#growth rate is the key driver of these patterns
#########################################################################################
#(c)
#run for loop with 6 different values
# tt for time
# iterating over
#par(mfrow=c(3,2)) plots multiple plots on one page
par(mfrow=c(3,2))
rr <- c(-0.25,0.15,1,1.5,2.3,3)
for (ii in 1:length(rr)){
RickerFun(rr[ii],100,400,50)
}
#########################################################################################
#(d)
##(rr,N0,KK,ttMax,PLOTFLAG=1)
nvec <- RickerFun(1.05,20,1000,50,0)
which(nvec >= 500)[1]
#when calling a specific position, put square brackets with position number after the vector
#########################################################################################
#(e)
KK <- 1000
N0 <- 100
ttMax <- 50
aa <- 0.1
rr <- seq(0.1,0.9,aa)
duration <- seq(0,ttMax,1)
TimeToHalfK <- vector('numeric',length(rr))
#vector of half time to carrying capacity for each of the growth rates in "duration"
for (jj in 1:length(rr)){
nvec <- RickerFun(rr[jj],N0,KK,ttMax)
vec <- which(nvec >= KK/2)[1]
TimeToHalfK[jj] <- duration[vec]
}
par(mfrow=c(1,1))
plot(rr,TimeToHalfK, xlab = "r",ylab="Time to Half K", col='blue',type='l')
#########################################################################################
#(f)
#
#initialize inputs for new vectors KK & rr; also initalize N0 & ttMax
# create blank vector for population number outputs with length equal to number of combinations of KK x rr
# initalize Ricker function at 0, so that you can increment it inside of the loop
#output a matrix of the population at each combination of KK & rr
# write a nested for loop to run through all of the possibilies of rr, with all of the possibilities of KK
# inside of the nested loop increment Ricker function
# run RickerFun function with the variables at rr[jj] and KK[ii], returns NN
# place outputs of RickerFun into the popMatrix created earlier
#########################################################################################
#(g)
rr <- c(0.5,1.0,1.5)
KK <- c(100,200,300)
N0 <- 2
ttMax <- 10
#nested loop
#run Ricker last because NN will be the output
#cycle to all other functions first
popMatrix <- matrix(nrow = length(KK),ncol = length(rr))
for(ii in 1:length(KK)){
for(jj in 1:length(rr)){
nvec <- RickerFun(rr[jj],N0,KK[ii],ttMax)
popMatrix[ii,jj] <- nvec[ttMax]
#popMatrix[Rickers] = RickerFun(rr[jj],N0,KK[ii],ttMax)
#how to have RickerFun output save into popMatrix, and increment popMatrix index?
# popMatrix[Rickers] = RickerFun()
}
}
#outputs the matrix
popMatrix
# check thar Ricker fun is running correctly
RickerFun(rr[3],N0,KK[2],10)
contour(x = seq(min(rr), max(rr), length.out = nrow(popMatrix)),
y = seq(min(KK), max(KK), length.out = ncol(popMatrix)),
z = popMatrix)
#OR
library("lattice")
levelplot(popMatrix)
| /Assignment2/Assignment 2 Jamie.R | no_license | ioanaanghel/Ioana-RBootcamp | R | false | false | 4,234 | r | #Bootcamp modeling exercises
#Assignment 2 - Ioana Anghel
#Ricker Model
###(a)
tt <- 0
N0 <- 100
rr <- 1.05
pop.size = numeric(11)
KK = 300
ttmax = 10
NN <-matrix(NA, nrow=1, ncol=ttmax+1)
NN[1] <- N0
#sets value at first point in vector
RickerFun <- function(rr,N0,KK,ttMax,PLOTFLAG=0){
NN <- rep(NA,ttMax+1)
NN[1] <- N0
for(tt in 1:ttMax) {
NN[tt+1] = NN[tt]*exp(rr*(1-(NN[tt]/KK)))
}
if (PLOTFLAG==1){
plot(1:(ttMax+1),NN, xlab = "Time",ylab="Population size", col='blue',type='l')
}
return(NN)
}
#to supress plot from generating whenever running the function RickerFun,
# PLOTFLAG=number other than the default, which in this case is 1
# ex: RickerFun(1.05,100,400,50,0)
#########################################################################################
#(b)
## (rr,N0,KK,ttMax,PLOTFLAG=1)
#Population decreases to n = 0.
# to reach zero population must have negative growth (decline)
RickerFun(-0.25,10,400,50)
#Population approaches stable equilibrium at n??? = K, without oscillations
RickerFun(0.15,10,400,50)
#Decaying oscillations around n??? = K.
RickerFun(1.5,100,400,50)
#Persistent, regular oscillations.
RickerFun(2.3,100,400,50)
#Crazy, random-looking ???uctuations (chaos).
# growth rate is so high that boom and bust cycles are not consistent
RickerFun(3,100,400,50)
#growth rate is the key driver of these patterns
#########################################################################################
#(c)
#run for loop with 6 different values
# tt for time
# iterating over
#par(mfrow=c(3,2)) plots multiple plots on one page
par(mfrow=c(3,2))
rr <- c(-0.25,0.15,1,1.5,2.3,3)
for (ii in 1:length(rr)){
RickerFun(rr[ii],100,400,50)
}
#########################################################################################
#(d)
##(rr,N0,KK,ttMax,PLOTFLAG=1)
nvec <- RickerFun(1.05,20,1000,50,0)
which(nvec >= 500)[1]
#when calling a specific position, put square brackets with position number after the vector
#########################################################################################
#(e)
KK <- 1000
N0 <- 100
ttMax <- 50
aa <- 0.1
rr <- seq(0.1,0.9,aa)
duration <- seq(0,ttMax,1)
TimeToHalfK <- vector('numeric',length(rr))
#vector of half time to carrying capacity for each of the growth rates in "duration"
for (jj in 1:length(rr)){
nvec <- RickerFun(rr[jj],N0,KK,ttMax)
vec <- which(nvec >= KK/2)[1]
TimeToHalfK[jj] <- duration[vec]
}
par(mfrow=c(1,1))
plot(rr,TimeToHalfK, xlab = "r",ylab="Time to Half K", col='blue',type='l')
#########################################################################################
#(f)
#
#initialize inputs for new vectors KK & rr; also initalize N0 & ttMax
# create blank vector for population number outputs with length equal to number of combinations of KK x rr
# initalize Ricker function at 0, so that you can increment it inside of the loop
#output a matrix of the population at each combination of KK & rr
# write a nested for loop to run through all of the possibilies of rr, with all of the possibilities of KK
# inside of the nested loop increment Ricker function
# run RickerFun function with the variables at rr[jj] and KK[ii], returns NN
# place outputs of RickerFun into the popMatrix created earlier
#########################################################################################
#(g)
rr <- c(0.5,1.0,1.5)
KK <- c(100,200,300)
N0 <- 2
ttMax <- 10
#nested loop
#run Ricker last because NN will be the output
#cycle to all other functions first
popMatrix <- matrix(nrow = length(KK),ncol = length(rr))
for(ii in 1:length(KK)){
for(jj in 1:length(rr)){
nvec <- RickerFun(rr[jj],N0,KK[ii],ttMax)
popMatrix[ii,jj] <- nvec[ttMax]
#popMatrix[Rickers] = RickerFun(rr[jj],N0,KK[ii],ttMax)
#how to have RickerFun output save into popMatrix, and increment popMatrix index?
# popMatrix[Rickers] = RickerFun()
}
}
#outputs the matrix
popMatrix
# check thar Ricker fun is running correctly
RickerFun(rr[3],N0,KK[2],10)
contour(x = seq(min(rr), max(rr), length.out = nrow(popMatrix)),
y = seq(min(KK), max(KK), length.out = ncol(popMatrix)),
z = popMatrix)
#OR
library("lattice")
levelplot(popMatrix)
|
theta_estimation<-function( m, N, u, x, theta, L1, L2, L3=NULL, L4=NULL, L5=NULL ){
l3<-0
#For the first Likelihood
l3 <- l3 + u[,1]%*%(log(L1(x,theta[1])))
#For the second Likelihood
l3 <- l3 +u[,2]%*%(log(L2(x,theta[2])))
#If we have more than two Likelihoods we can use this algorithm
if (m>2){
l3 <- l3 +u[,3]%*%(log(L3(x,theta[3])))
}
if (m>3) {
l3 <- l3 +u[,4]%*%(log(L4(x,theta[4])))
}
if (m>4){
l3 <- l3 +u[,5]%*%(log(L5(x,theta[5])))
}
return(l3*-1)
}
| /R/theta_estimation.R | no_license | MrLehna/HMM | R | false | false | 513 | r | theta_estimation<-function( m, N, u, x, theta, L1, L2, L3=NULL, L4=NULL, L5=NULL ){
l3<-0
#For the first Likelihood
l3 <- l3 + u[,1]%*%(log(L1(x,theta[1])))
#For the second Likelihood
l3 <- l3 +u[,2]%*%(log(L2(x,theta[2])))
#If we have more than two Likelihoods we can use this algorithm
if (m>2){
l3 <- l3 +u[,3]%*%(log(L3(x,theta[3])))
}
if (m>3) {
l3 <- l3 +u[,4]%*%(log(L4(x,theta[4])))
}
if (m>4){
l3 <- l3 +u[,5]%*%(log(L5(x,theta[5])))
}
return(l3*-1)
}
|
require(stringr)
require(tidyr)
require(dplyr)
require(sqldf)
# read in the revised data frame from midterm part 1
taxdata <- read.csv("/home/havb/Dropbox/MSUI/Big Data for Cities - PPUA 5262 - 01/R/data/Tax Assessor/TAdata.csv", stringsAsFactors = FALSE)
# Calculate the Fire Risk for Residential Properties
# First, get a subset of residential buildings using the LU type
resSub<-taxdata[which(taxdata$LU %in% c('R1','R2','R3','R4','A','CM')),]
resSub$R_KITCH<-ifelse(is.na(resSub$R_KITCH),0,resSub$R_KITCH)
# Run the calculation
resSub$R_FIRE_RISK<-(1 * as.integer(resSub$YR_BUILT<1940 & is.na(resSub$YR_REMOD)))+
(0.5 * as.integer(resSub$R_KITCH>0)) +
(1 * as.integer(resSub$R_KITCH>1)) +
(1 * as.integer(resSub$R_KITCH>2)) +
(1 * as.integer(resSub$R_KITCH>3)) +
(1 * as.integer(resSub$R_HEAT_TYP=="S"))
# Merge the fire risk back into the taxdata data frame
colnames(taxdata)[1]<-"X1"
colnames(resSub)[1]<-"X1"
taxdata<-sqldf("select t.*, r.R_FIRE_RISK from
taxdata t
LEFT OUTER JOIN resSub r on r.X1 = t.X1")
colnames(taxdata)[1]<-"X.1"
## create split function that create's new field of first three digits of YR_BUILT
split_decade <- function(df){
df <- extract(df, 'DecadeBuilt', into=c('Decade', 'Year'), '(.{3})(.{1})')
}
## create new data frame after running split function
taxdata$DecadeBuilt <- taxdata$YR_BUILT
taxdata <- split_decade(taxdata)
## create new field that includes a "0s" at the end and remove "split" year field
taxdata$DecadeBuilt <- paste0(taxdata$Decade,"0s")
taxdata$Year<-NULL
taxdata$Decade<-NULL
## create split function that create's new field of first three digits of YR_BUILT
split_decade <- function(df){
df <- extract(df, 'DecadeRemod', into=c('Decade', 'Year'), '(.{3})(.{1})')
}
## create new data frame after running split function
taxdata$DecadeRemod <- taxdata$YR_Remod
taxdata <- split_decade(taxdata)
## create new field that includes a "0s" at the end and remove "split" year field
taxdata$DecadeRemod <- paste0(taxdata$Decade,"0s")
taxdata$Year<-NULL
taxdata$Decade<-NULL
## Residential subset
taxdata_R_EE<- taxdata[taxdata$LU =='R1'| taxdata$LU == 'R2'| taxdata$LU == 'R3'| taxdata$LU == 'R4'| taxdata$LU == 'A'| taxdata$LU == 'RC',]
## Allocating Residential Heat Type Energy Efficiency Score
taxdata_R_EE$HEAT_SCORE <- NA
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'S', 0, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'W', 1, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'P', 2, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'F', 3, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'E', 4, taxdata_R_EE$HEAT_SCORE)
## Allocating age to Residential buildings
taxdata_R_EE$YR_BUILT <- ifelse(taxdata_R_EE$YR_BUILT == '',NA,taxdata_R_EE$YR_BUILT)
taxdata_R_EE$YR_BUILT <- ifelse(taxdata_R_EE$YR_BUILT == 0,NA,taxdata_R_EE$YR_BUILT)
taxdata_R_EE$YR_REMOD <- ifelse(taxdata_R_EE$YR_REMOD == '',NA,taxdata_R_EE$YR_REMOD)
taxdata_R_EE$YR_REMOD <- ifelse(taxdata_R_EE$YR_REMOD == 0,NA,taxdata_R_EE$YR_REMOD)
taxdata_R_EE$BLDG_AGE <- ifelse(is.na(taxdata_R_EE$YR_REMOD), (2015 - taxdata_R_EE$YR_BUILT), (2015 - taxdata_R_EE$YR_REMOD))
taxdata_R_EE$BLDG_AGE <- ifelse(taxdata_R_EE$BLDG_AGE <=0,NA,taxdata_R_EE$BLDG_AGE)
##Allocating Building Age Score
taxdata_R_EE$AGE_SCORE <- NA
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE < 50,4,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 50,3,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 100,2,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 150,1,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 200,0,taxdata_R_EE$AGE_SCORE)
## Allocating Residential Air Conditioner Energy Efficiency Score
taxdata_R_EE$COOL_SCORE <- NA
taxdata_R_EE$COOL_SCORE <- ifelse(taxdata_R_EE$R_AC == 'C', 1, taxdata_R_EE$COOL_SCORE)
taxdata_R_EE$COOL_SCORE <- ifelse(taxdata_R_EE$R_AC == 'D', 2, taxdata_R_EE$COOL_SCORE)
taxdata_R_EE$COOL_SCORE <- ifelse(taxdata_R_EE$R_AC == 'N', 3, taxdata_R_EE$COOL_SCORE)
## Aggregate Energy Efficiency score at building level
taxdata_R_EE$EE_SCORE <- taxdata_R_EE$AGE_SCORE+0.75*taxdata_R_EE$HEAT_SCORE+0.75*taxdata_R_EE$COOL_SCORE
#Add a column with a 1 to 10 ranking (decile) for the parcel's sq foot value
taxdata <- mutate(taxdata,
BLDG_RANK = ceiling(rank(AV_BLDG_PER_SF,na.last="keep")/
length(which(!is.na(AV_BLDG_PER_SF)))*10) )
# fix a single parcel in the entire dataset with LU = "XX",
#a land use code that is not registered in the datasat dictionary.
#Since the parcel is owned by a church, it's assumed a Tax Exempt parcel.
taxdata[taxdata$LU == 'XX',]$LU <- "E"
# Reduce land use categories
simplify_LU <- function(LU) {
if (LU %in% c("R1", "R2", "R3", "R4", "RL", "A")) {
return("RESIDENTIAL")
} else if (LU %in% c("CM", "CP")) {
return("CONDO")
} else if (LU == "CD") {
return("CONDO_UNIT")
} else if (LU == "RC") {
return("MIX_RC")
} else if (LU %in% c("CC", "C", "CL")) {
return("COMMERCIAL")
} else if (LU == "AH") {
return("AGRICULTURAL")
} else if (LU == "I") {
return("INDUSTRIAL")
} else if (LU == "E") {
return("TAX_EXEMPT")
} else if (LU == "EA") {
return("TAX_EXEMPT_BRA")
} else {
return(NA)
}
}
#Create a new column by applying the simplifyLU function
taxdata <- transform(taxdata, SIMPLIFIED_LU = sapply(LU, simplify_LU))
# Identify homes
isHome <- function(SIMPLIFIED_LU) {
if (SIMPLIFIED_LU %in% c("RESIDENTIAL", "CONDO_UNIT", "MIX_RC")) {
return(1)
} else {
return(0)
}
}
# Create a new column by applying the isHome function
taxdata <- transform(taxdata, HOME = sapply(SIMPLIFIED_LU, isHome)) | /Assignments/Final/Final_Record_Level_Syntax.R | no_license | bitsandbricks/Boston-Tax-Assesor-Dataset-2015 | R | false | false | 5,966 | r | require(stringr)
require(tidyr)
require(dplyr)
require(sqldf)
# read in the revised data frame from midterm part 1
taxdata <- read.csv("/home/havb/Dropbox/MSUI/Big Data for Cities - PPUA 5262 - 01/R/data/Tax Assessor/TAdata.csv", stringsAsFactors = FALSE)
# Calculate the Fire Risk for Residential Properties
# First, get a subset of residential buildings using the LU type
resSub<-taxdata[which(taxdata$LU %in% c('R1','R2','R3','R4','A','CM')),]
resSub$R_KITCH<-ifelse(is.na(resSub$R_KITCH),0,resSub$R_KITCH)
# Run the calculation
resSub$R_FIRE_RISK<-(1 * as.integer(resSub$YR_BUILT<1940 & is.na(resSub$YR_REMOD)))+
(0.5 * as.integer(resSub$R_KITCH>0)) +
(1 * as.integer(resSub$R_KITCH>1)) +
(1 * as.integer(resSub$R_KITCH>2)) +
(1 * as.integer(resSub$R_KITCH>3)) +
(1 * as.integer(resSub$R_HEAT_TYP=="S"))
# Merge the fire risk back into the taxdata data frame
colnames(taxdata)[1]<-"X1"
colnames(resSub)[1]<-"X1"
taxdata<-sqldf("select t.*, r.R_FIRE_RISK from
taxdata t
LEFT OUTER JOIN resSub r on r.X1 = t.X1")
colnames(taxdata)[1]<-"X.1"
## create split function that create's new field of first three digits of YR_BUILT
split_decade <- function(df){
df <- extract(df, 'DecadeBuilt', into=c('Decade', 'Year'), '(.{3})(.{1})')
}
## create new data frame after running split function
taxdata$DecadeBuilt <- taxdata$YR_BUILT
taxdata <- split_decade(taxdata)
## create new field that includes a "0s" at the end and remove "split" year field
taxdata$DecadeBuilt <- paste0(taxdata$Decade,"0s")
taxdata$Year<-NULL
taxdata$Decade<-NULL
## create split function that create's new field of first three digits of YR_BUILT
split_decade <- function(df){
df <- extract(df, 'DecadeRemod', into=c('Decade', 'Year'), '(.{3})(.{1})')
}
## create new data frame after running split function
taxdata$DecadeRemod <- taxdata$YR_Remod
taxdata <- split_decade(taxdata)
## create new field that includes a "0s" at the end and remove "split" year field
taxdata$DecadeRemod <- paste0(taxdata$Decade,"0s")
taxdata$Year<-NULL
taxdata$Decade<-NULL
## Residential subset
taxdata_R_EE<- taxdata[taxdata$LU =='R1'| taxdata$LU == 'R2'| taxdata$LU == 'R3'| taxdata$LU == 'R4'| taxdata$LU == 'A'| taxdata$LU == 'RC',]
## Allocating Residential Heat Type Energy Efficiency Score
taxdata_R_EE$HEAT_SCORE <- NA
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'S', 0, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'W', 1, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'P', 2, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'F', 3, taxdata_R_EE$HEAT_SCORE)
taxdata_R_EE$HEAT_SCORE <- ifelse(taxdata_R_EE$R_HEAT_TYP == 'E', 4, taxdata_R_EE$HEAT_SCORE)
## Allocating age to Residential buildings
taxdata_R_EE$YR_BUILT <- ifelse(taxdata_R_EE$YR_BUILT == '',NA,taxdata_R_EE$YR_BUILT)
taxdata_R_EE$YR_BUILT <- ifelse(taxdata_R_EE$YR_BUILT == 0,NA,taxdata_R_EE$YR_BUILT)
taxdata_R_EE$YR_REMOD <- ifelse(taxdata_R_EE$YR_REMOD == '',NA,taxdata_R_EE$YR_REMOD)
taxdata_R_EE$YR_REMOD <- ifelse(taxdata_R_EE$YR_REMOD == 0,NA,taxdata_R_EE$YR_REMOD)
taxdata_R_EE$BLDG_AGE <- ifelse(is.na(taxdata_R_EE$YR_REMOD), (2015 - taxdata_R_EE$YR_BUILT), (2015 - taxdata_R_EE$YR_REMOD))
taxdata_R_EE$BLDG_AGE <- ifelse(taxdata_R_EE$BLDG_AGE <=0,NA,taxdata_R_EE$BLDG_AGE)
##Allocating Building Age Score
taxdata_R_EE$AGE_SCORE <- NA
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE < 50,4,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 50,3,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 100,2,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 150,1,taxdata_R_EE$AGE_SCORE)
taxdata_R_EE$AGE_SCORE <- ifelse(taxdata_R_EE$BLDG_AGE >= 200,0,taxdata_R_EE$AGE_SCORE)
## Allocating Residential Air Conditioner Energy Efficiency Score
taxdata_R_EE$COOL_SCORE <- NA
taxdata_R_EE$COOL_SCORE <- ifelse(taxdata_R_EE$R_AC == 'C', 1, taxdata_R_EE$COOL_SCORE)
taxdata_R_EE$COOL_SCORE <- ifelse(taxdata_R_EE$R_AC == 'D', 2, taxdata_R_EE$COOL_SCORE)
taxdata_R_EE$COOL_SCORE <- ifelse(taxdata_R_EE$R_AC == 'N', 3, taxdata_R_EE$COOL_SCORE)
## Aggregate Energy Efficiency score at building level
taxdata_R_EE$EE_SCORE <- taxdata_R_EE$AGE_SCORE+0.75*taxdata_R_EE$HEAT_SCORE+0.75*taxdata_R_EE$COOL_SCORE
#Add a column with a 1 to 10 ranking (decile) for the parcel's sq foot value
taxdata <- mutate(taxdata,
BLDG_RANK = ceiling(rank(AV_BLDG_PER_SF,na.last="keep")/
length(which(!is.na(AV_BLDG_PER_SF)))*10) )
# fix a single parcel in the entire dataset with LU = "XX",
#a land use code that is not registered in the datasat dictionary.
#Since the parcel is owned by a church, it's assumed a Tax Exempt parcel.
taxdata[taxdata$LU == 'XX',]$LU <- "E"
# Reduce land use categories
simplify_LU <- function(LU) {
if (LU %in% c("R1", "R2", "R3", "R4", "RL", "A")) {
return("RESIDENTIAL")
} else if (LU %in% c("CM", "CP")) {
return("CONDO")
} else if (LU == "CD") {
return("CONDO_UNIT")
} else if (LU == "RC") {
return("MIX_RC")
} else if (LU %in% c("CC", "C", "CL")) {
return("COMMERCIAL")
} else if (LU == "AH") {
return("AGRICULTURAL")
} else if (LU == "I") {
return("INDUSTRIAL")
} else if (LU == "E") {
return("TAX_EXEMPT")
} else if (LU == "EA") {
return("TAX_EXEMPT_BRA")
} else {
return(NA)
}
}
#Create a new column by applying the simplifyLU function
taxdata <- transform(taxdata, SIMPLIFIED_LU = sapply(LU, simplify_LU))
# Identify homes
isHome <- function(SIMPLIFIED_LU) {
if (SIMPLIFIED_LU %in% c("RESIDENTIAL", "CONDO_UNIT", "MIX_RC")) {
return(1)
} else {
return(0)
}
}
# Create a new column by applying the isHome function
taxdata <- transform(taxdata, HOME = sapply(SIMPLIFIED_LU, isHome)) |
rm(list=ls())
library(ggplot2)
library(ggmap)
statesMap = map_data("state")
str(statesMap)
table(statesMap$group)
ggplot(statesMap, aes(x=long, y=lat, group=group))+geom_polygon(fill="white", color="blue")+coord_map('mercator')
?mapproject
polling=read.csv("C:/users/zahid/downloads/PollingImputed.csv")
Train=subset(polling, Year==2004 | Year==2008)
Test=subset(polling, Year ==2012)
mod2 = glm(Republican~SurveyUSA+DiffCount, data=Train, family="binomial")
TestPrediction = predict(mod2, newdata=Test, type="response")
TestPredictionBinary = as.numeric(TestPrediction > 0.5)
predictionDataFrame = data.frame(TestPrediction, TestPredictionBinary, Test$State)
table(TestPredictionBinary)
table(Test$Republican, TestPrediction>0.5)
mean(TestPrediction)
predictionDataFrame$region = tolower(predictionDataFrame$Test.State)
predictionMap = merge(statesMap, predictionDataFrame, by = "region")
predictionMap = predictionMap[order(predictionMap$order),] #so map will be drawn properly
str(predictionMap)
str(statesMap)
?merge
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary)) + geom_polygon(color = "black")
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary))+ geom_polygon(color = "black") + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPrediction))+ geom_polygon(color = "black") + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
table(Test$State=="florida", TestPrediction)
str(TestPrediction)
str(Test)
temp=cbind(Test$State, TestPrediction)
Test$Republican[6]
?geom_polygon
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary))+ geom_polygon(color = "black", alpha=0.3) + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
?alpha
| /Visualization(US Presidential Election).R | no_license | zahidmak/dataAnalysis | R | false | false | 2,111 | r | rm(list=ls())
library(ggplot2)
library(ggmap)
statesMap = map_data("state")
str(statesMap)
table(statesMap$group)
ggplot(statesMap, aes(x=long, y=lat, group=group))+geom_polygon(fill="white", color="blue")+coord_map('mercator')
?mapproject
polling=read.csv("C:/users/zahid/downloads/PollingImputed.csv")
Train=subset(polling, Year==2004 | Year==2008)
Test=subset(polling, Year ==2012)
mod2 = glm(Republican~SurveyUSA+DiffCount, data=Train, family="binomial")
TestPrediction = predict(mod2, newdata=Test, type="response")
TestPredictionBinary = as.numeric(TestPrediction > 0.5)
predictionDataFrame = data.frame(TestPrediction, TestPredictionBinary, Test$State)
table(TestPredictionBinary)
table(Test$Republican, TestPrediction>0.5)
mean(TestPrediction)
predictionDataFrame$region = tolower(predictionDataFrame$Test.State)
predictionMap = merge(statesMap, predictionDataFrame, by = "region")
predictionMap = predictionMap[order(predictionMap$order),] #so map will be drawn properly
str(predictionMap)
str(statesMap)
?merge
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary)) + geom_polygon(color = "black")
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary))+ geom_polygon(color = "black") + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPrediction))+ geom_polygon(color = "black") + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
table(Test$State=="florida", TestPrediction)
str(TestPrediction)
str(Test)
temp=cbind(Test$State, TestPrediction)
Test$Republican[6]
?geom_polygon
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary))+ geom_polygon(color = "black", alpha=0.3) + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
?alpha
|
# Library ====
library(tidyverse)
# Lectura de datos ====
# En este caso no aplica porque voy a sacar los datos de las bases de R
# who <- read_csv("data/raw/who.csv")
?who
# Pivot_longer
who1 <- who %>% pivot_longer(new_sp_m014:newrel_f65, names_to = "aux", values_to = 'casos',
values_drop_na = TRUE)
who2 <- who1 %>% mutate(aux = str_replace(aux, "newrel", "new_rel"))
who3 <- who2 %>% separate(aux, c("new", "tipo", "genero_edad")) %>%
select(-new)
who3 %>% filter(!str_detect(new, 'new'))
who4 <- who3 %>% separate(genero_edad, into = c("genero", "edad"), sep = 1)
who5 <- who4 %>% separate(edad, into = c("edad_low", "edad_high"), sep = -2,
convert = TRUE)
dataset <- left_join(who5, population, by = c("country", "year")) %>%
mutate(rate = casos / population) %>%
select(-iso2, iso3)
write_csv(dataset, "data/interim/who_clean.csv")
| /src/04-who.R | no_license | graf-anapaula/curso_econ | R | false | false | 919 | r | # Library ====
library(tidyverse)
# Lectura de datos ====
# En este caso no aplica porque voy a sacar los datos de las bases de R
# who <- read_csv("data/raw/who.csv")
?who
# Pivot_longer
who1 <- who %>% pivot_longer(new_sp_m014:newrel_f65, names_to = "aux", values_to = 'casos',
values_drop_na = TRUE)
who2 <- who1 %>% mutate(aux = str_replace(aux, "newrel", "new_rel"))
who3 <- who2 %>% separate(aux, c("new", "tipo", "genero_edad")) %>%
select(-new)
who3 %>% filter(!str_detect(new, 'new'))
who4 <- who3 %>% separate(genero_edad, into = c("genero", "edad"), sep = 1)
who5 <- who4 %>% separate(edad, into = c("edad_low", "edad_high"), sep = -2,
convert = TRUE)
dataset <- left_join(who5, population, by = c("country", "year")) %>%
mutate(rate = casos / population) %>%
select(-iso2, iso3)
write_csv(dataset, "data/interim/who_clean.csv")
|
#21
x <- seq(60,140,length = 1000)
plot(x,dnorm(x,100,16),type="l",ylim = c(0,0.1),ylab ="Normal Density")
n=4
curve(dnorm(x,100,16/sqrt(n)),col="red", add=TRUE)
n=8
curve(dnorm(x,100,16/sqrt(n)),col="blue", add=TRUE)
#22
x <- seq(0,10,length = 1000)
plot(x,dchisq(x,df=2),type="l",ylim=c(0,0.5))
curve(dchisq(x,df=3),col="red",add=TRUE)
curve(dchisq(x,df=4),col="blue",add=TRUE)
qchisq(0.95,df=2) #5.991465
qchisq(0.95,df=3) #7.814728
qchisq(0.95,df=4) #9.487
#23
chi <- ((8-1)*400)/256
1-pchisq(chi,7)
1-pchisq(20,7)
#24
x<-c(82.03,75.89,88.39,79.59,70.4,88.14,85.13,80.40,90.27,74.17)
mean(x)
sd(x)
| /Review.R | no_license | isaacattuah/MTH224-R | R | false | false | 645 | r | #21
x <- seq(60,140,length = 1000)
plot(x,dnorm(x,100,16),type="l",ylim = c(0,0.1),ylab ="Normal Density")
n=4
curve(dnorm(x,100,16/sqrt(n)),col="red", add=TRUE)
n=8
curve(dnorm(x,100,16/sqrt(n)),col="blue", add=TRUE)
#22
x <- seq(0,10,length = 1000)
plot(x,dchisq(x,df=2),type="l",ylim=c(0,0.5))
curve(dchisq(x,df=3),col="red",add=TRUE)
curve(dchisq(x,df=4),col="blue",add=TRUE)
qchisq(0.95,df=2) #5.991465
qchisq(0.95,df=3) #7.814728
qchisq(0.95,df=4) #9.487
#23
chi <- ((8-1)*400)/256
1-pchisq(chi,7)
1-pchisq(20,7)
#24
x<-c(82.03,75.89,88.39,79.59,70.4,88.14,85.13,80.40,90.27,74.17)
mean(x)
sd(x)
|
#Clustering - Simple dataset - Marks in 2 subjects
A=c(1,1.5,3,5,3.5,4.5,3.5)
B=c(1,2,4,7,5,5,4.5)
marks=data.frame(A,B)
marks
(c1 <- kmeans(marks, 2)) #kmeans algorithm specifies no of clusters
cbind(marks, c1$cluster)
plot(marks, col = c1$cluster)
#cluster works only on numerical values
points(c1$centers, col = 1:2, pch = 8, cex = 2) #centre values of that particular cluster, 1:2 specifies the colour
#pch is plotting symbol
c1$iter #iteration
#C1- 1, 2 : (1.3, 1.5)
#C2- 3, 4, 5, 6, 7 : (3.9, 5.1)
#
#Specify Coordinates for Centers
mcenters = marks[c(1,4),] #1st and 4th row is selected, become the centre point, around this the 2 clusters are created
mcenters
(c2a <- kmeans(marks, centers=mcenters))
c2a
matrix(c(1,1,5,7), ncol=2)
?matrix
(c2b <- kmeans(marks, centers=matrix(c(1,1,5,7), ncol=2)))
c2a
cbind(marks,c2a$cluster)
c2a$centers
aggregate(marks,by=list(c2a$cluster),FUN=mean)
c2a
c2a$iter
library(dplyr)
marks %>% group_by(c2a$cluster) %>% summarise_all(funs(sum, mean, median, n()))
# Distances
x1=marks[1,]; x2=marks[2,]
x1;x2
sqrt(sum((x1-x2)^2))
sqrt(1.25)
dist(rbind(x1,x2))
euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2))
for (i in 1:7)
print(paste(i, round(euc.dist(marks[i,], marks[1,]),2),sep='-'))
ref1 = marks[1,]; ref1
ref2 = marks[4,]; ref2
(d1= apply(marks,1,function(x)sqrt(sum((x-ref1)^2))))
(d2= apply(marks,1,function(x)sqrt(sum((x-ref2)^2))))
df=cbind(marks, d1,d2)
df
apply(df, 1, function(x) max(which(x == min(x, na.rm = TRUE))))
df
apply(df[,c(3,4)],1, min)
df3 <-transform(df, mind1d2=apply(df[,c(3,4)],1, min, na.rm = TRUE))
df3
| /cluster.r | no_license | Nihali/MA | R | false | false | 1,595 | r | #Clustering - Simple dataset - Marks in 2 subjects
A=c(1,1.5,3,5,3.5,4.5,3.5)
B=c(1,2,4,7,5,5,4.5)
marks=data.frame(A,B)
marks
(c1 <- kmeans(marks, 2)) #kmeans algorithm specifies no of clusters
cbind(marks, c1$cluster)
plot(marks, col = c1$cluster)
#cluster works only on numerical values
points(c1$centers, col = 1:2, pch = 8, cex = 2) #centre values of that particular cluster, 1:2 specifies the colour
#pch is plotting symbol
c1$iter #iteration
#C1- 1, 2 : (1.3, 1.5)
#C2- 3, 4, 5, 6, 7 : (3.9, 5.1)
#
#Specify Coordinates for Centers
mcenters = marks[c(1,4),] #1st and 4th row is selected, become the centre point, around this the 2 clusters are created
mcenters
(c2a <- kmeans(marks, centers=mcenters))
c2a
matrix(c(1,1,5,7), ncol=2)
?matrix
(c2b <- kmeans(marks, centers=matrix(c(1,1,5,7), ncol=2)))
c2a
cbind(marks,c2a$cluster)
c2a$centers
aggregate(marks,by=list(c2a$cluster),FUN=mean)
c2a
c2a$iter
library(dplyr)
marks %>% group_by(c2a$cluster) %>% summarise_all(funs(sum, mean, median, n()))
# Distances
x1=marks[1,]; x2=marks[2,]
x1;x2
sqrt(sum((x1-x2)^2))
sqrt(1.25)
dist(rbind(x1,x2))
euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2))
for (i in 1:7)
print(paste(i, round(euc.dist(marks[i,], marks[1,]),2),sep='-'))
ref1 = marks[1,]; ref1
ref2 = marks[4,]; ref2
(d1= apply(marks,1,function(x)sqrt(sum((x-ref1)^2))))
(d2= apply(marks,1,function(x)sqrt(sum((x-ref2)^2))))
df=cbind(marks, d1,d2)
df
apply(df, 1, function(x) max(which(x == min(x, na.rm = TRUE))))
df
apply(df[,c(3,4)],1, min)
df3 <-transform(df, mind1d2=apply(df[,c(3,4)],1, min, na.rm = TRUE))
df3
|
\name{adjacency_matrix}
\alias{adjacency_matrix}
\alias{adjmat}
\title{Takes a Matrix and Generates an Adjacency Matrix}
\usage{
adjacency_matrix(matrix.obj)
adjmat(matrix.obj)
}
\arguments{
\item{matrix.obj}{A matrix object, preferably, of the
class "termco" generated from \code{\link[qdap]{termco}},
\code{\link[qdap]{termco.d}} or
\code{\link[qdap]{termco.c}}.}
}
\value{
Returns list: \item{boolean}{A Boolean matrix}
\item{adjacency}{An adjacency matrix. Diagonals are the
total (sum) number of occurrences a variable had}
\item{shared}{An adjacency matrix with no diagonal and
the upper triangle replaced with NA} \item{sum}{The
diagonal of the adjacency matrix; the total (sum) number
of occurrences a variable had}
}
\description{
Takes a matrix (wfm) or termco object and generates an
adjacency matrix for use with the
\href{http://igraph.sourceforge.net/}{igraph} package.
}
\examples{
\dontrun{
words <- c(" you", " the", "it", "oo")
Terms <- with(DATA, termco(state, list(sex, adult), words))
Terms
adjacency_matrix(Terms)
wordLIST <- c(" montague", " capulet", " court", " marry")
raj.termco <- with(raj.act.1, termco(dialogue, person, wordLIST))
raj.adjmat <- adjmat(raj.termco)
names(raj.adjmat) #see what's available from the adjacency_matrix object
library(igraph)
g <- graph.adjacency(raj.adjmat$adjacency, weighted=TRUE, mode ="undirected")
g <- simplify(g)
V(g)$label <- V(g)$name
V(g)$degree <- degree(g)
plot(g, layout=layout.auto(g))
}
}
\seealso{
\code{\link[stats]{dist}}
}
\keyword{adjacency-matrix,}
\keyword{Boolean-matrix}
| /man/adjacency_matrix.Rd | no_license | abresler/qdap | R | false | false | 1,592 | rd | \name{adjacency_matrix}
\alias{adjacency_matrix}
\alias{adjmat}
\title{Takes a Matrix and Generates an Adjacency Matrix}
\usage{
adjacency_matrix(matrix.obj)
adjmat(matrix.obj)
}
\arguments{
\item{matrix.obj}{A matrix object, preferably, of the
class "termco" generated from \code{\link[qdap]{termco}},
\code{\link[qdap]{termco.d}} or
\code{\link[qdap]{termco.c}}.}
}
\value{
Returns list: \item{boolean}{A Boolean matrix}
\item{adjacency}{An adjacency matrix. Diagonals are the
total (sum) number of occurrences a variable had}
\item{shared}{An adjacency matrix with no diagonal and
the upper triangle replaced with NA} \item{sum}{The
diagonal of the adjacency matrix; the total (sum) number
of occurrences a variable had}
}
\description{
Takes a matrix (wfm) or termco object and generates an
adjacency matrix for use with the
\href{http://igraph.sourceforge.net/}{igraph} package.
}
\examples{
\dontrun{
words <- c(" you", " the", "it", "oo")
Terms <- with(DATA, termco(state, list(sex, adult), words))
Terms
adjacency_matrix(Terms)
wordLIST <- c(" montague", " capulet", " court", " marry")
raj.termco <- with(raj.act.1, termco(dialogue, person, wordLIST))
raj.adjmat <- adjmat(raj.termco)
names(raj.adjmat) #see what's available from the adjacency_matrix object
library(igraph)
g <- graph.adjacency(raj.adjmat$adjacency, weighted=TRUE, mode ="undirected")
g <- simplify(g)
V(g)$label <- V(g)$name
V(g)$degree <- degree(g)
plot(g, layout=layout.auto(g))
}
}
\seealso{
\code{\link[stats]{dist}}
}
\keyword{adjacency-matrix,}
\keyword{Boolean-matrix}
|
divisor <- 5
# c2, weekday1234_lunch
cluster_numb = 3
credit_1_cash_0_set = 1
# start <- 6
# end <- 11
# start <- 10
# end <- 15
start <- 11
end <- 21
cluster_total<-read.csv(file=paste("/Users/alan/Desktop/data_reference/cluster_data_in.csv",sep=""),head=TRUE,sep=",")
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
# cluster1<-read.csv(file=paste("/Users/alan/Desktop/data_reference/cluster",cluster_numb,'.csv',sep=""),head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek < 5 )
data_location<-subset(data_location,hourofday < 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek < 5 )
data_location<-subset(data_location,hourofday > 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek >= 5 )
data_location<-subset(data_location,hourofday < 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek >= 5 )
data_location<-subset(data_location,hourofday > 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
| /regression_log.R | no_license | yaoxiang1/Machine-Learning-and-Data-Analytics-Research | R | false | false | 14,342 | r | divisor <- 5
# c2, weekday1234_lunch
cluster_numb = 3
credit_1_cash_0_set = 1
# start <- 6
# end <- 11
# start <- 10
# end <- 15
start <- 11
end <- 21
cluster_total<-read.csv(file=paste("/Users/alan/Desktop/data_reference/cluster_data_in.csv",sep=""),head=TRUE,sep=",")
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
# cluster1<-read.csv(file=paste("/Users/alan/Desktop/data_reference/cluster",cluster_numb,'.csv',sep=""),head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek < 5 )
data_location<-subset(data_location,hourofday < 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek < 5 )
data_location<-subset(data_location,hourofday > 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek >= 5 )
data_location<-subset(data_location,hourofday < 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
#########################################################################################################
order_multi<-read.csv(file="/Users/alan/Desktop/data_reference/cut_cdf.csv",head=TRUE,sep=",")
cluster1<-cluster_total
cluster1<-merge(cluster1, order_multi, by="orderid" , all.x=TRUE)
numb_item_no_restrict_table <- read.csv(file="/Users/alan/Desktop/data_reference/numb_item_no_restrict_table_withmin.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, numb_item_no_restrict_table, by="orderid")
cluster1$orderitem[is.na(cluster1$orderitem)] <- 0
#################################################################################################
cluster1$min_price_item[cluster1$min_price_item > 3 | cluster1$numb_item_no_restrict == 1] <- 0
#################################################################################################
large_pizza_orders<-read.csv(file="/Users/alan/Desktop/data_reference/large_pizza_orders.csv",head=TRUE,sep=",")
cluster1<-merge(cluster1, large_pizza_orders, by="orderid" , all.x=TRUE)
cluster1$large_pizza[is.na(cluster1$large_pizza)] <- 0
data_location<-cluster1
data_location<-subset(data_location,credit_1_cash_0 == credit_1_cash_0_set)
data_location<-subset(data_location,orderitem == 0)
data_location<-subset(data_location,large_pizza == 0)
data_location<-subset(data_location,customerid_0_visitor!=0)
data_location<-subset(data_location,discount == 0)
#################################################################################################
# red < <
# blue < >
# green >= <
# brown >= >
data_location<-subset(data_location,dayofweek >= 5 )
data_location<-subset(data_location,hourofday > 15)
#################################################################################################
data_location<-subset(data_location,cluster == cluster_numb)
cluster1 <- data_location
cluster1$total_min_item <- cluster1$total - cluster1$min_price_item
cluster1$is_weekend <- floor(cluster1$dayofweek/4)
cluster1$is_dinner <- floor(cluster1$hourofday/15)
###################################################################################################
###################################################################################################
# The location to decide price interval
cluster1<-subset(cluster1,total_min_item <= end)
cluster1<-subset(cluster1,total_min_item >= start)
###################################################################################################
###################################################################################################
cluster1["is_addon"] <- NA
cluster1$is_addon[which(cluster1$min_price_item > 0 )]<-1
cluster1$is_addon[is.na(cluster1$is_addon)] <- 0
cluster1["numb_item_1"] <- NA
cluster1$numb_item_1[which(floor(cluster1$total_min_item)%%divisor==0 | floor(cluster1$total_min_item)%%divisor==1 | floor(cluster1$total_min_item)%%divisor==2 )]<-1
cluster1$numb_item_1[is.na(cluster1$numb_item_1)] <- 0
# cluster1$numb_item_1
focus_flag_1 <- cluster1$numb_item_1
focus_total <- cluster1$total
delivery <- cluster1$deli_1_takeout_0
dinner <- cluster1$is_dinner
weekend <- cluster1$is_weekend
is_addon <- cluster1$is_addon
model1<-glm(is_addon ~ focus_total + focus_flag_1, family=binomial)
print (summary(model1))
|
library(biomaRt)
# make a
counts_design <- read.csv("../../Ensembl_species_counts_designfactors.csv",stringsAsFactors = FALSE)
design <- counts_design[counts_design$Ensembl == 'Empty',]
#design$type <- c("species","native_salinity","clade","group","condition")
drops <- c("X","Ensembl")
counts<-counts_design[!counts_design$Ensembl == 'Empty',]
rownames(counts)<-counts$Ensembl
design <- design[ , !(names(design) %in% drops)]
counts <- counts[ , !(names(counts) %in% drops)]
dim(design)
dim(counts)
ensembl_proteinID <- rownames(counts)
ensembl=useMart("ENSEMBL_MART_ENSEMBL")
ensembl = useDataset("fheteroclitus_gene_ensembl",mart=ensembl)
length(ensembl_proteinID)
query<-getBM(attributes=c('ensembl_peptide_id','ensembl_transcript_id','ensembl_gene_id','gene_biotype','external_gene_name','go_id','description','entrezgene'), filters = 'ensembl_peptide_id', values = ensembl_proteinID, mart=ensembl)
query$entrezgene[query$entrezgene==""] <- "NA"
ann<-write.csv(query,"~/Documents/UCDavis/Whitehead/Ensembl_annotations.csv")
| /DE_scripts/getannotations.R | no_license | WhiteheadLab/RNAseq_17killifish | R | false | false | 1,033 | r | library(biomaRt)
# make a
counts_design <- read.csv("../../Ensembl_species_counts_designfactors.csv",stringsAsFactors = FALSE)
design <- counts_design[counts_design$Ensembl == 'Empty',]
#design$type <- c("species","native_salinity","clade","group","condition")
drops <- c("X","Ensembl")
counts<-counts_design[!counts_design$Ensembl == 'Empty',]
rownames(counts)<-counts$Ensembl
design <- design[ , !(names(design) %in% drops)]
counts <- counts[ , !(names(counts) %in% drops)]
dim(design)
dim(counts)
ensembl_proteinID <- rownames(counts)
ensembl=useMart("ENSEMBL_MART_ENSEMBL")
ensembl = useDataset("fheteroclitus_gene_ensembl",mart=ensembl)
length(ensembl_proteinID)
query<-getBM(attributes=c('ensembl_peptide_id','ensembl_transcript_id','ensembl_gene_id','gene_biotype','external_gene_name','go_id','description','entrezgene'), filters = 'ensembl_peptide_id', values = ensembl_proteinID, mart=ensembl)
query$entrezgene[query$entrezgene==""] <- "NA"
ann<-write.csv(query,"~/Documents/UCDavis/Whitehead/Ensembl_annotations.csv")
|
#Name:Kelvin
#Function makeCacheMatrix will helps transform user input to a matrix format, before setting the value of the matrix,
#get the value of the matrix, set the inverse Matrix and get the inverse Matrix.
#The matrix object will than cache its own object.
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
#set the value of the Matrix
setMatrix <- function(y) {
x <<- y
invMatrix <<- NULL
}
getMatrix <- function() x
setInverse <- function(inverse) invMatrix <<- inverse
getInverse <- function() invMatrix
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
}
## The function cacheSolve will brings the output of the previous function as an
# input and checks whether the inverse for of the matrix from the previous function have any value or not.
# In case inverse matrix from makeCacheMatrix((matrix) is empty, it will gets the original matrix data from
# and set the inversible matrix by using the solve function.
# For cases where the inverse matrix from the previous function has some value in it,
#it will returns a message "getting cached data"
#and the cached object
# While for cases where the inverse matrix from the previous function has no value it will gets the original matrix data from
# it will retrieve its original matrix value and this value will be set as the inversible matrix by using the solve function.
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
if(!is.null(invMatrix)) {
message("getting cached data")
return(invMatrix)
}
MatrixData <- x$getMatrix()
invMatrix <- solve(MatrixData, ...)
x$setInverse(invMatrix)
return(invMatrix)
}
#Test [2*2 Matrix]#
SampleMatrix <- matrix(1:4,2,2)
SampleMatrix
CacheMatrix <- makeCacheMatrix(SampleMatrix)
CacheMatrix$getMatrix()
CacheMatrix$getInverse()
cacheSolve(CacheMatrix)
cacheSolve(CacheMatrix)
| /cachematrix.R | no_license | kelvinchee7/Peer-graded-Assignment-Programming-Assignment-2-Lexical-Scoping | R | false | false | 2,215 | r |
#Name:Kelvin
#Function makeCacheMatrix will helps transform user input to a matrix format, before setting the value of the matrix,
#get the value of the matrix, set the inverse Matrix and get the inverse Matrix.
#The matrix object will than cache its own object.
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
#set the value of the Matrix
setMatrix <- function(y) {
x <<- y
invMatrix <<- NULL
}
getMatrix <- function() x
setInverse <- function(inverse) invMatrix <<- inverse
getInverse <- function() invMatrix
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
}
## The function cacheSolve will brings the output of the previous function as an
# input and checks whether the inverse for of the matrix from the previous function have any value or not.
# In case inverse matrix from makeCacheMatrix((matrix) is empty, it will gets the original matrix data from
# and set the inversible matrix by using the solve function.
# For cases where the inverse matrix from the previous function has some value in it,
#it will returns a message "getting cached data"
#and the cached object
# While for cases where the inverse matrix from the previous function has no value it will gets the original matrix data from
# it will retrieve its original matrix value and this value will be set as the inversible matrix by using the solve function.
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
if(!is.null(invMatrix)) {
message("getting cached data")
return(invMatrix)
}
MatrixData <- x$getMatrix()
invMatrix <- solve(MatrixData, ...)
x$setInverse(invMatrix)
return(invMatrix)
}
#Test [2*2 Matrix]#
SampleMatrix <- matrix(1:4,2,2)
SampleMatrix
CacheMatrix <- makeCacheMatrix(SampleMatrix)
CacheMatrix$getMatrix()
CacheMatrix$getInverse()
cacheSolve(CacheMatrix)
cacheSolve(CacheMatrix)
|
# R01 GSH DO Mapping Code
# Updated September 2020
# Becca Gould
#LIVER GLUTATHIONE + NAD MAPPING - Allele Plots
#Creates Heat Maps that shows the individual CC founder allele contribution at a specific SNP from the Giga MUGA
#Created by Becca Gould with Sue McClatchy on 9/24/2020
#Based on Greg Keele's heat map code
#Load in Liver-GSH-NAD-RankZ-SexGen.Rdata
#load the command line tools
library(qtl2)
library (tidyverse)
library (readxl)
library(yaml)
library(devtools)
library(RSQLite)
library(jsonlite)
library (data.table)
library (RcppEigen)
library (RSQLite)
library (writexl)
library (pander)
####################################################
## Finding and sorting through all of the QTL data
####################################################
#tells you all of the qtlscans that you have
ls(pattern = "qtl")
#use cbind to combine all of the qtlscans + take those 2D tables and combining them with another table over and over again
## scans is an R object containing your genome scans from scan1() that are loaded in to the R environment
scans <- cbind(qtlscan_LiverGSH, qtlscan_LiverGSSG, qtlscan_LiverTotalGSH, qtlscan_LiverGSH_GSSGRatio, qtlscan_LiverGSH_GSSGcovar, qtlscan_LiverRedoxPotentialGSSG2GSH, qtlscan_LiverNADH, qtlscan_LiverNADP, qtlscan_LiverNADPH, qtlscan_LiverNADP_NADPHRatio)
#thresholds <- cbind(threshold_LiverGSH, threshold_LiverGSSG, threshold_LiverTotalGSH, threshold_LiverGSH_GSSGRatio, threshold_LiverGSH_GSSGcovar, threshold_LiverNADH, threshold_LiverNADP, threshold_LiverNADPH, threshold_LiverNADP_NADPHRatio, threshold_ALT, threshold_AST, threshold_BUN)
#head(thresholds)
#scans variable created from "Exporting QTL Results" -- for RankZ transformed pheno
head(scans)
####################################################
## Create the probability plotting function (made by Greg Keele)
####################################################
prob_plot <- function(pheno_vec,
pheno_name = NULL,
genoprobs,
qtl_chr,
qtl_marker,
cols = gray(10000:1/10000),
label_col = as.character(qtl2::CCcolors),
founders = c("AJ", "B6", "129", "NOD", "NZO", "CAST", "PWK", "WSB"),
main = "") {
sorted_pheno <- sort(pheno_vec)
image(genoprobs[[qtl_chr]][names(sorted_pheno), rev(LETTERS[1:8]), qtl_marker] * 2,
yaxt = "n", xaxt = "n", col = cols)
axis(2, at = seq(0, 8, 1 + 1/8)/8, labels = FALSE,
lty = 0, srt = 90, las = 2)
mtext(text = main, side = 3, padj = -1, cex = 1.25)
mtext(text = rev(founders), side = 2, col = rev(label_col), at = seq(0, 1, length.out = 8),
las = 1, cex = 1.25, adj = 1.25, font = 2)
mtext(text = paste("lower", "<--", ifelse(is.null(pheno_name), "phenotype", pheno_name), "-->", "higher"), side = 1, padj = 1.25, cex = 1.25)
}
####################################################
## Alter the pheno file accordingly
## pheno file needs to be a data frame for QTL analysis, but for these allele probability plots, it must be a matrix
####################################################
#need to make the pheno file a matrix so that it runs in the code (currently a data frame)
#first need to identify what specifically to make part of the phenotype matrix (only need transformed data!)
names(pheno)
#from this, I've identified I only need columns 24-34
#pheno_mat is the matrix of outcomes (phenotypes)
pheno_mat <- as.matrix(pheno[c(24:34)])
#check rownames to make sure they are already set as the write row names (they are)
rownames(pheno[c(24:34)])
####################################################
## Review and print the QTL peaks from all of the QTL scans
####################################################
## I just set threshold to 6 (tells you all of the important qtl peaks with a LOD score > 6)
## map is the qtl2 map you want to use (gmap or pmap)
qtl_gmap <- find_peaks(scans, map = R01_GSH_DO_QTLdata$gmap, threshold = 6, peakdrop = 1.8, drop = 1.5, expand2markers = FALSE)
qtl_gmap
qtl_pmap <- find_peaks(scans, map = R01_GSH_DO_QTLdata$pmap, threshold = 6, peakdrop = 1.8, drop = 1.5, expand2markers = FALSE)
qtl_pmap
#Add marker information
qtl_gmap$marker.id <- find_marker(map = R01_GSH_DO_QTLdata$gmap, chr = qtl_gmap$chr, pos = qtl_gmap$pos)
qtl_gmap$marker.id
qtl_gmap
qtl_pmap$marker.id <- find_marker(map = R01_GSH_DO_QTLdata$pmap, chr = qtl_pmap$chr, pos = qtl_pmap$pos)
qtl_pmap$marker.id
qtl_pmap
setwd("~/OneDrive - University of Georgia/Pazdro Lab/R01 Redox/Analysis and Results/QTL Mapping - Liver/RankZ/RankZ - sexgen/Investigating Allele Plots")
write_xlsx(list("QTL List RankZ SexGen - cM" = qtl_gmap,
"QTL List RankZ SexGen - Mbp" = qtl_pmap),
"QTL List - RankZ sexgen.xlsx")
#gives print out of all LOD peaks > 6
#later edited by Becca --> "Final QTL results - RankZ"
####################################################
## ALLELE PLOTS CODE - LOOP
####################################################
#set working directory to store the plots
pdf(file = "allele-plots_cM - RankZ sexgen.pdf") # create a file called allele-plots.pdf
# loop through all qtl_gmap above lod threshold of 6 and create an individual plot
for (i in 1:dim(qtl_gmap)[1]) {
prob_plot(pheno_vec = pheno_mat[,qtl_gmap$lodcolumn[i]],
genoprobs = probs,
qtl_chr = qtl_gmap$chr[i],
qtl_marker = qtl_gmap$marker.id[i],
main = paste("lodindex", qtl_gmap$lodindex[i], "Chr", qtl_gmap$chr[i], qtl_gmap$marker.id[i], qtl_gmap$pos[i], qtl_gmap$lodcolumn[i]))
}
# be sure to turn the graphics output off at the end!
dev.off()
#set working directory to store the plots
pdf(file = "allele-plots_Mbp - RankZ sexgen.pdf") # create a file called allele-plots.pdf
# loop through all qtl_gmap above lod threshold of 6 and create an individual plot
for (i in 1:dim(qtl_pmap)[1]) {
prob_plot(pheno_vec = pheno_mat[,qtl_pmap$lodcolumn[i]],
genoprobs = probs,
qtl_chr = qtl_pmap$chr[i],
qtl_marker = qtl_pmap$marker.id[i],
main = paste(qtl_pmap$lodindex[i], "Chr", qtl_pmap$chr[i], qtl_pmap$marker.id[i], qtl_pmap$pos[i], qtl_pmap$lodcolumn[i]))
}
# be sure to turn the graphics output off at the end!
dev.off()
####################################################
## Printing out the results
####################################################
#INDIVIDUAL PLOTS CODE:
prob_plot(pheno_vec = pheno_mat[,qtl$lodcolumn[1]],
genoprobs = probs,
qtl_chr = qtl$chr[1],
qtl_marker = qtl$marker.id[1])
#shows you the individual column for each lodindex
qtl$lodcolumn[1]
#zLiverGSH
#[1] is a vector of values for lod index -- in this case, [1] refers to the first peak of zLiverGSH
| /Liver-GSH-NAD/RankZ/RankZ-sexgen/Allele plots/Allele Plots - RankZ SexGen.R | no_license | rebeccalgould/Rqtl2-Glutathione-Genetics | R | false | false | 6,990 | r | # R01 GSH DO Mapping Code
# Updated September 2020
# Becca Gould
#LIVER GLUTATHIONE + NAD MAPPING - Allele Plots
#Creates Heat Maps that shows the individual CC founder allele contribution at a specific SNP from the Giga MUGA
#Created by Becca Gould with Sue McClatchy on 9/24/2020
#Based on Greg Keele's heat map code
#Load in Liver-GSH-NAD-RankZ-SexGen.Rdata
#load the command line tools
library(qtl2)
library (tidyverse)
library (readxl)
library(yaml)
library(devtools)
library(RSQLite)
library(jsonlite)
library (data.table)
library (RcppEigen)
library (RSQLite)
library (writexl)
library (pander)
####################################################
## Finding and sorting through all of the QTL data
####################################################
#tells you all of the qtlscans that you have
ls(pattern = "qtl")
#use cbind to combine all of the qtlscans + take those 2D tables and combining them with another table over and over again
## scans is an R object containing your genome scans from scan1() that are loaded in to the R environment
scans <- cbind(qtlscan_LiverGSH, qtlscan_LiverGSSG, qtlscan_LiverTotalGSH, qtlscan_LiverGSH_GSSGRatio, qtlscan_LiverGSH_GSSGcovar, qtlscan_LiverRedoxPotentialGSSG2GSH, qtlscan_LiverNADH, qtlscan_LiverNADP, qtlscan_LiverNADPH, qtlscan_LiverNADP_NADPHRatio)
#thresholds <- cbind(threshold_LiverGSH, threshold_LiverGSSG, threshold_LiverTotalGSH, threshold_LiverGSH_GSSGRatio, threshold_LiverGSH_GSSGcovar, threshold_LiverNADH, threshold_LiverNADP, threshold_LiverNADPH, threshold_LiverNADP_NADPHRatio, threshold_ALT, threshold_AST, threshold_BUN)
#head(thresholds)
#scans variable created from "Exporting QTL Results" -- for RankZ transformed pheno
head(scans)
####################################################
## Create the probability plotting function (made by Greg Keele)
####################################################
prob_plot <- function(pheno_vec,
pheno_name = NULL,
genoprobs,
qtl_chr,
qtl_marker,
cols = gray(10000:1/10000),
label_col = as.character(qtl2::CCcolors),
founders = c("AJ", "B6", "129", "NOD", "NZO", "CAST", "PWK", "WSB"),
main = "") {
sorted_pheno <- sort(pheno_vec)
image(genoprobs[[qtl_chr]][names(sorted_pheno), rev(LETTERS[1:8]), qtl_marker] * 2,
yaxt = "n", xaxt = "n", col = cols)
axis(2, at = seq(0, 8, 1 + 1/8)/8, labels = FALSE,
lty = 0, srt = 90, las = 2)
mtext(text = main, side = 3, padj = -1, cex = 1.25)
mtext(text = rev(founders), side = 2, col = rev(label_col), at = seq(0, 1, length.out = 8),
las = 1, cex = 1.25, adj = 1.25, font = 2)
mtext(text = paste("lower", "<--", ifelse(is.null(pheno_name), "phenotype", pheno_name), "-->", "higher"), side = 1, padj = 1.25, cex = 1.25)
}
####################################################
## Alter the pheno file accordingly
## pheno file needs to be a data frame for QTL analysis, but for these allele probability plots, it must be a matrix
####################################################
#need to make the pheno file a matrix so that it runs in the code (currently a data frame)
#first need to identify what specifically to make part of the phenotype matrix (only need transformed data!)
names(pheno)
#from this, I've identified I only need columns 24-34
#pheno_mat is the matrix of outcomes (phenotypes)
pheno_mat <- as.matrix(pheno[c(24:34)])
#check rownames to make sure they are already set as the write row names (they are)
rownames(pheno[c(24:34)])
####################################################
## Review and print the QTL peaks from all of the QTL scans
####################################################
## I just set threshold to 6 (tells you all of the important qtl peaks with a LOD score > 6)
## map is the qtl2 map you want to use (gmap or pmap)
qtl_gmap <- find_peaks(scans, map = R01_GSH_DO_QTLdata$gmap, threshold = 6, peakdrop = 1.8, drop = 1.5, expand2markers = FALSE)
qtl_gmap
qtl_pmap <- find_peaks(scans, map = R01_GSH_DO_QTLdata$pmap, threshold = 6, peakdrop = 1.8, drop = 1.5, expand2markers = FALSE)
qtl_pmap
#Add marker information
qtl_gmap$marker.id <- find_marker(map = R01_GSH_DO_QTLdata$gmap, chr = qtl_gmap$chr, pos = qtl_gmap$pos)
qtl_gmap$marker.id
qtl_gmap
qtl_pmap$marker.id <- find_marker(map = R01_GSH_DO_QTLdata$pmap, chr = qtl_pmap$chr, pos = qtl_pmap$pos)
qtl_pmap$marker.id
qtl_pmap
setwd("~/OneDrive - University of Georgia/Pazdro Lab/R01 Redox/Analysis and Results/QTL Mapping - Liver/RankZ/RankZ - sexgen/Investigating Allele Plots")
write_xlsx(list("QTL List RankZ SexGen - cM" = qtl_gmap,
"QTL List RankZ SexGen - Mbp" = qtl_pmap),
"QTL List - RankZ sexgen.xlsx")
#gives print out of all LOD peaks > 6
#later edited by Becca --> "Final QTL results - RankZ"
####################################################
## ALLELE PLOTS CODE - LOOP
####################################################
#set working directory to store the plots
pdf(file = "allele-plots_cM - RankZ sexgen.pdf") # create a file called allele-plots.pdf
# loop through all qtl_gmap above lod threshold of 6 and create an individual plot
for (i in 1:dim(qtl_gmap)[1]) {
prob_plot(pheno_vec = pheno_mat[,qtl_gmap$lodcolumn[i]],
genoprobs = probs,
qtl_chr = qtl_gmap$chr[i],
qtl_marker = qtl_gmap$marker.id[i],
main = paste("lodindex", qtl_gmap$lodindex[i], "Chr", qtl_gmap$chr[i], qtl_gmap$marker.id[i], qtl_gmap$pos[i], qtl_gmap$lodcolumn[i]))
}
# be sure to turn the graphics output off at the end!
dev.off()
#set working directory to store the plots
pdf(file = "allele-plots_Mbp - RankZ sexgen.pdf") # create a file called allele-plots.pdf
# loop through all qtl_gmap above lod threshold of 6 and create an individual plot
for (i in 1:dim(qtl_pmap)[1]) {
prob_plot(pheno_vec = pheno_mat[,qtl_pmap$lodcolumn[i]],
genoprobs = probs,
qtl_chr = qtl_pmap$chr[i],
qtl_marker = qtl_pmap$marker.id[i],
main = paste(qtl_pmap$lodindex[i], "Chr", qtl_pmap$chr[i], qtl_pmap$marker.id[i], qtl_pmap$pos[i], qtl_pmap$lodcolumn[i]))
}
# be sure to turn the graphics output off at the end!
dev.off()
####################################################
## Printing out the results
####################################################
#INDIVIDUAL PLOTS CODE:
prob_plot(pheno_vec = pheno_mat[,qtl$lodcolumn[1]],
genoprobs = probs,
qtl_chr = qtl$chr[1],
qtl_marker = qtl$marker.id[1])
#shows you the individual column for each lodindex
qtl$lodcolumn[1]
#zLiverGSH
#[1] is a vector of values for lod index -- in this case, [1] refers to the first peak of zLiverGSH
|
library(tidyverse)
library(stringr)
header_1 <- read_csv2("../HW_data/exp_betyg_ak6_kommun_2018_19.csv", skip =5, n_max = 1, col_names = FALSE) %>%
replace(is.na(.),"")
header_2 <- read_csv2("../HW_data/exp_betyg_ak6_kommun_2018_19.csv", skip =6, n_max = 1, col_names = FALSE) %>%
replace(is.na(.),"")
betyg_wrong_header <- read_csv2("../HW_data/exp_betyg_ak6_kommun_2018_19.csv", skip =6, col_names = TRUE)
old_header = c(colnames(betyg_wrong_header))
new_header = paste(header_1, header_2)
colnames(betyg_wrong_header) <- str_replace(new_header,"(^ )","") # fix trailing spaces and rename header
betyg1 <- betyg_wrong_header %>%
select(-"") %>% # strip last column (empty)
filter(`Typ av huvudman` == "Samtliga") %>% # No reason to keep data on public/private school.
replace(. == ".", NA) %>% # rename . to NA
mutate_all(funs(str_replace(.,",", "."))) %>% #replace , with .
mutate(`Antal elever Totalt` = sub("\\s+", "", `Antal elever Totalt`)) %>% # remove 1000's delimiter
mutate(`Antal elever Pojkar` = sub("\\s+", "", `Antal elever Pojkar`)) %>% # remove 1000's delimiter
mutate(`Antal elever Flickor` = sub("\\s+", "", `Antal elever Flickor`)) # remove 1000's delimiter
betyg2 <- betyg1 %>%
mutate(`Antal elever Pojkar` =
case_when(
`Antal elever Pojkar` == ".." &
is.numeric(as.numeric(`Antal elever Flickor`)) &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) - as.numeric(`Antal elever Flickor`),
`Antal elever Pojkar` == ".." &
`Antal elever Flickor` == ".." &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) / 2,
TRUE ~ as.numeric(`Antal elever Pojkar`)
))
betyg3 <- betyg2 %>%
mutate(`Antal elever Flickor` =
case_when(
`Antal elever Flickor` == ".." &
is.numeric(as.numeric(`Antal elever Pojkar`)) &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) - as.numeric(`Antal elever Pojkar`),
`Antal elever Flickor` == ".." &
`Antal elever Pojkar` == ".." &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) / 2,
TRUE ~ as.numeric(`Antal elever Flickor`)
))
betyg8 <- betyg %>% #fixa genomsnittlig betygspoäng
mutate(`Genomsnittlig betygspoäng Flickor` = case_when(
`Genomsnittlig betygspoäng Flickor` == 5 &
!is.na(`Genomsnittlig betygspoäng Pojkar`) &
!is.na(`Genomsnittlig betygspoäng Totalt`)
~ (as.numeric(`Genomsnittlig betygspoäng Pojkar`)+ as.numeric(`Genomsnittlig betygspoäng Totalt`) )/2,
TRUE ~ as.numeric(`Genomsnittlig betygspoäng Flickor`)
)) %>%
mutate(`Genomsnittlig betygspoäng Pojkar` = case_when(
`Genomsnittlig betygspoäng Pojkar` == 5 &
!is.na(`Genomsnittlig betygspoäng Flickor`) &
!is.na(`Genomsnittlig betygspoäng Totalt`)
~ (as.numeric(`Genomsnittlig betygspoäng Flickor`)+ as.numeric(`Genomsnittlig betygspoäng Totalt`) )/2,
TRUE ~ as.numeric(`Genomsnittlig betygspoäng Pojkar`)
))
betyg <- betyg %>%
mutate(`Andel (%) med A-E Flickor` = # Fix .. in the "Andel" columns
case_when(
as.numeric(`Andel (%) med A-E Flickor`) == 5 ~ 0,
is.na(`Andel (%) med A-E Flickor`) ~ as.numeric(NA),
TRUE ~ as.numeric(`Andel (%) med A-E Flickor`))
) %>%
mutate(`Andel (%) med A-E Pojkar` = # Fix .. in the "Andel" columns
case_when(
as.numeric(`Andel (%) med A-E Pojkar`) == 5 ~ 0,
is.na(`Andel (%) med A-E Pojkar`) ~ as.numeric(NA),
TRUE ~ as.numeric(`Andel (%) med A-E Pojkar`))
)
grade_cols <- c(`Antal elever Totalt`, `Antal elever Flickor`, `Antal elever Pojkar`, `Andel (%) med A-E Totalt`, `Andel (%) med A-E Flickor`,
`Andel (%) med A-E Pojkar`, `Genomsnittlig betygspoäng Totalt`, `Genomsnittlig betygspoäng Flickor`, `Genomsnittlig betygspoäng Pojkar`)
count_NA <- betyg %>%
mutate(NAs = rowSums(is.na(betyg[7:15]))) %>%
mutate(non_NAs = rowSums(!is.na(betyg[7:15]))) %>%
group_by(`Ämne`) %>%
summarize(
count_NAs = sum(NAs),
count_non_NAs = sum(non_NAs),
percentage_missing = count_NAs / (count_NAs + count_non_NAs)*100
)
reshaped_betyg <- betyg_sub_imputed[c(2,6,13)] %>%
spread(key = Ämne, value = `Genomsnittlig betygspoäng Totalt`)
KNN <- kmeans(reshaped_betyg,centers = 2)
| /HW4/HW4.R | no_license | Sjoeborg/MT5013 | R | false | false | 4,661 | r | library(tidyverse)
library(stringr)
header_1 <- read_csv2("../HW_data/exp_betyg_ak6_kommun_2018_19.csv", skip =5, n_max = 1, col_names = FALSE) %>%
replace(is.na(.),"")
header_2 <- read_csv2("../HW_data/exp_betyg_ak6_kommun_2018_19.csv", skip =6, n_max = 1, col_names = FALSE) %>%
replace(is.na(.),"")
betyg_wrong_header <- read_csv2("../HW_data/exp_betyg_ak6_kommun_2018_19.csv", skip =6, col_names = TRUE)
old_header = c(colnames(betyg_wrong_header))
new_header = paste(header_1, header_2)
colnames(betyg_wrong_header) <- str_replace(new_header,"(^ )","") # fix trailing spaces and rename header
betyg1 <- betyg_wrong_header %>%
select(-"") %>% # strip last column (empty)
filter(`Typ av huvudman` == "Samtliga") %>% # No reason to keep data on public/private school.
replace(. == ".", NA) %>% # rename . to NA
mutate_all(funs(str_replace(.,",", "."))) %>% #replace , with .
mutate(`Antal elever Totalt` = sub("\\s+", "", `Antal elever Totalt`)) %>% # remove 1000's delimiter
mutate(`Antal elever Pojkar` = sub("\\s+", "", `Antal elever Pojkar`)) %>% # remove 1000's delimiter
mutate(`Antal elever Flickor` = sub("\\s+", "", `Antal elever Flickor`)) # remove 1000's delimiter
betyg2 <- betyg1 %>%
mutate(`Antal elever Pojkar` =
case_when(
`Antal elever Pojkar` == ".." &
is.numeric(as.numeric(`Antal elever Flickor`)) &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) - as.numeric(`Antal elever Flickor`),
`Antal elever Pojkar` == ".." &
`Antal elever Flickor` == ".." &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) / 2,
TRUE ~ as.numeric(`Antal elever Pojkar`)
))
betyg3 <- betyg2 %>%
mutate(`Antal elever Flickor` =
case_when(
`Antal elever Flickor` == ".." &
is.numeric(as.numeric(`Antal elever Pojkar`)) &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) - as.numeric(`Antal elever Pojkar`),
`Antal elever Flickor` == ".." &
`Antal elever Pojkar` == ".." &
is.numeric(as.numeric(`Antal elever Totalt`))
~ as.numeric(`Antal elever Totalt`) / 2,
TRUE ~ as.numeric(`Antal elever Flickor`)
))
betyg8 <- betyg %>% #fixa genomsnittlig betygspoäng
mutate(`Genomsnittlig betygspoäng Flickor` = case_when(
`Genomsnittlig betygspoäng Flickor` == 5 &
!is.na(`Genomsnittlig betygspoäng Pojkar`) &
!is.na(`Genomsnittlig betygspoäng Totalt`)
~ (as.numeric(`Genomsnittlig betygspoäng Pojkar`)+ as.numeric(`Genomsnittlig betygspoäng Totalt`) )/2,
TRUE ~ as.numeric(`Genomsnittlig betygspoäng Flickor`)
)) %>%
mutate(`Genomsnittlig betygspoäng Pojkar` = case_when(
`Genomsnittlig betygspoäng Pojkar` == 5 &
!is.na(`Genomsnittlig betygspoäng Flickor`) &
!is.na(`Genomsnittlig betygspoäng Totalt`)
~ (as.numeric(`Genomsnittlig betygspoäng Flickor`)+ as.numeric(`Genomsnittlig betygspoäng Totalt`) )/2,
TRUE ~ as.numeric(`Genomsnittlig betygspoäng Pojkar`)
))
betyg <- betyg %>%
mutate(`Andel (%) med A-E Flickor` = # Fix .. in the "Andel" columns
case_when(
as.numeric(`Andel (%) med A-E Flickor`) == 5 ~ 0,
is.na(`Andel (%) med A-E Flickor`) ~ as.numeric(NA),
TRUE ~ as.numeric(`Andel (%) med A-E Flickor`))
) %>%
mutate(`Andel (%) med A-E Pojkar` = # Fix .. in the "Andel" columns
case_when(
as.numeric(`Andel (%) med A-E Pojkar`) == 5 ~ 0,
is.na(`Andel (%) med A-E Pojkar`) ~ as.numeric(NA),
TRUE ~ as.numeric(`Andel (%) med A-E Pojkar`))
)
grade_cols <- c(`Antal elever Totalt`, `Antal elever Flickor`, `Antal elever Pojkar`, `Andel (%) med A-E Totalt`, `Andel (%) med A-E Flickor`,
`Andel (%) med A-E Pojkar`, `Genomsnittlig betygspoäng Totalt`, `Genomsnittlig betygspoäng Flickor`, `Genomsnittlig betygspoäng Pojkar`)
count_NA <- betyg %>%
mutate(NAs = rowSums(is.na(betyg[7:15]))) %>%
mutate(non_NAs = rowSums(!is.na(betyg[7:15]))) %>%
group_by(`Ämne`) %>%
summarize(
count_NAs = sum(NAs),
count_non_NAs = sum(non_NAs),
percentage_missing = count_NAs / (count_NAs + count_non_NAs)*100
)
reshaped_betyg <- betyg_sub_imputed[c(2,6,13)] %>%
spread(key = Ämne, value = `Genomsnittlig betygspoäng Totalt`)
KNN <- kmeans(reshaped_betyg,centers = 2)
|
#'Plot of efficacy probability density
#' just a wrapper for \code{reshape2::dcast} from \code{reshape2}
#' @param ef efficacy dataframe from \code{efficacy.aggregate()}
#' @return List of wide formated dataframes of efficacy by mean,
#' geometric mean, median by clone for each experiment
#'
#' @examples
#' # ef_by_measure <- plot_ef_density(efficacy.df)
plot_ef_density <- function(ef,type, group){
n_group <- length(unique(ef[,group]))
ggplot2::ggplot(ef,aes_string(x = type, y = group, fill = group)) +
ggplot2::ggtitle("Efficacy Distribution") +
ggplot2::geom_joy() +
ggplot2::xlim(-2, max(ef[,type])) +
ggplot2::scale_fill_cyclical(values = gg_color_hue(n_group)) +
ggplot2::xlab("Efficacy")
}
| /R/plot_ef_density.R | no_license | faustovrz/bugcount | R | false | false | 741 | r | #'Plot of efficacy probability density
#' just a wrapper for \code{reshape2::dcast} from \code{reshape2}
#' @param ef efficacy dataframe from \code{efficacy.aggregate()}
#' @return List of wide formated dataframes of efficacy by mean,
#' geometric mean, median by clone for each experiment
#'
#' @examples
#' # ef_by_measure <- plot_ef_density(efficacy.df)
plot_ef_density <- function(ef,type, group){
n_group <- length(unique(ef[,group]))
ggplot2::ggplot(ef,aes_string(x = type, y = group, fill = group)) +
ggplot2::ggtitle("Efficacy Distribution") +
ggplot2::geom_joy() +
ggplot2::xlim(-2, max(ef[,type])) +
ggplot2::scale_fill_cyclical(values = gg_color_hue(n_group)) +
ggplot2::xlab("Efficacy")
}
|
################################################################################
## Coursera - Exploratory Data Analysis - Week 1 Assignment 1 ##
################################################################################
## Prerequisites
## 1. Ensure that file household_power_consumption.txt is already in current
## working directory (File can be downloaded from :
## http://archive.ics.uci.edu/ml/)
## 2. data.table package has been downloaded and installed. To install, type
## install.packages("data.table")
## Objective : To create 4 line graphs in a single plotfor the 1st and 2nd of
### Feb 2007 and save it in .png format.
## Requirements :
## Top left - 1. Title = <blank>
## 2. X-axis label = <blank>
## 3. Y-axis label = Global Active Power
##
## Bottom left - 1. Title = <blank>
## 2. X-axis label = <blank>
## 3. Y-axis label = Energy sub metering
## 4. Sub_metering_1 line color = Black
## 5. Sub_metering_2 line color = Red
## 6. Sub_metering_1 line color = Blue
## 7. Legend = Top right corner
##
## Top right - 1. Title = <blank>
## 2. X-axis label = datetime
## 3. Y-axis label = Voltage
##
## Bottom right - 1. Title = <blank>
## 2. X-axis label = datetime
## 3. Y-axis label = Voltage
##
## Overall - 1. Background color = Transparent
## 2. Size = 480 x 480 pixels
## Load data.table library
library(data.table)
## Read file into data table with default parameters; this will take a while
## with warning messages about the presence of ? character in some columns.
## This will be dealt with in later steps.
## 2,075,259 rows
data <- fread("household_power_consumption.txt")
## Subset the rows for 1st and 2nd Feb 2007; this might take a while
## 2,880 rows
sub.data <- subset(data, between(strptime(Date, "%d/%m/%Y"),
strptime("1/2/2007", "%d/%m/%Y"),
strptime("2/2/2007", "%d/%m/%Y")))
## Uncomment the code below to remove the original data set to free up the space
## if necessary
## rm(data)
## Plot to png device, by default, width and height are at 480 pixels
png("plot4.png", bg = "transparent")
## Set layout of plot, 2 x 2, fill up column first
par(mfcol = c(2,2))
## Create top left line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Global_active_power column is converted to number, supressing any warnings
## that ? characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Global_active_power)),
ylab = "Global Active Power",xlab = "", type = "l"))
## create bottom left line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Sub_metering_1 column is converted to number, supressing any warnings that ?
## characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Sub_metering_1)),
ylab = "Energy sub metering", xlab = "", type = "l"))
## Add points for Sub_metering_2 column, conversion required, same as above
with(sub.data, points(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Sub_metering_2)),
type = "l", col = "Red"))
## Add points for Sub_metering_3 column, conversion required, same as above
with(sub.data, points(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Sub_metering_3)),
type = "l", col = "Blue"))
## Add legend on top right
## Use lwd instead pch to specify that lines should be drawn in the legend
## instead of point characters
## Use bty = "n" to indicate absence of box
## Use y.intersp and cex to reduce the contents
## Use negative insets to force the legend to the corner
legend("topright", col = c("Black", "Red", "Blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1, bty = "n", y.intersp = 0.25, cex = 0.75, inset = c(-0.2, -0.1))
## Create top right line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Voltage column is converted to number, supressing any warnings that ?
## characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Voltage)),
ylab = "Voltage",xlab = "datetime", type = "l"))
## Create bottom right line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Global_reactive_power column is converted to number, supressing any warnings
## that ? characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Global_reactive_power)),
ylab = "Global_reactive_power", xlab = "datetime",
type = "l"))
## Remember to close device
dev.off()
| /plot4.R | no_license | y4nz1/ExData_Plotting1 | R | false | false | 5,346 | r | ################################################################################
## Coursera - Exploratory Data Analysis - Week 1 Assignment 1 ##
################################################################################
## Prerequisites
## 1. Ensure that file household_power_consumption.txt is already in current
## working directory (File can be downloaded from :
## http://archive.ics.uci.edu/ml/)
## 2. data.table package has been downloaded and installed. To install, type
## install.packages("data.table")
## Objective : To create 4 line graphs in a single plotfor the 1st and 2nd of
### Feb 2007 and save it in .png format.
## Requirements :
## Top left - 1. Title = <blank>
## 2. X-axis label = <blank>
## 3. Y-axis label = Global Active Power
##
## Bottom left - 1. Title = <blank>
## 2. X-axis label = <blank>
## 3. Y-axis label = Energy sub metering
## 4. Sub_metering_1 line color = Black
## 5. Sub_metering_2 line color = Red
## 6. Sub_metering_1 line color = Blue
## 7. Legend = Top right corner
##
## Top right - 1. Title = <blank>
## 2. X-axis label = datetime
## 3. Y-axis label = Voltage
##
## Bottom right - 1. Title = <blank>
## 2. X-axis label = datetime
## 3. Y-axis label = Voltage
##
## Overall - 1. Background color = Transparent
## 2. Size = 480 x 480 pixels
## Load data.table library
library(data.table)
## Read file into data table with default parameters; this will take a while
## with warning messages about the presence of ? character in some columns.
## This will be dealt with in later steps.
## 2,075,259 rows
data <- fread("household_power_consumption.txt")
## Subset the rows for 1st and 2nd Feb 2007; this might take a while
## 2,880 rows
sub.data <- subset(data, between(strptime(Date, "%d/%m/%Y"),
strptime("1/2/2007", "%d/%m/%Y"),
strptime("2/2/2007", "%d/%m/%Y")))
## Uncomment the code below to remove the original data set to free up the space
## if necessary
## rm(data)
## Plot to png device, by default, width and height are at 480 pixels
png("plot4.png", bg = "transparent")
## Set layout of plot, 2 x 2, fill up column first
par(mfcol = c(2,2))
## Create top left line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Global_active_power column is converted to number, supressing any warnings
## that ? characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Global_active_power)),
ylab = "Global Active Power",xlab = "", type = "l"))
## create bottom left line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Sub_metering_1 column is converted to number, supressing any warnings that ?
## characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Sub_metering_1)),
ylab = "Energy sub metering", xlab = "", type = "l"))
## Add points for Sub_metering_2 column, conversion required, same as above
with(sub.data, points(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Sub_metering_2)),
type = "l", col = "Red"))
## Add points for Sub_metering_3 column, conversion required, same as above
with(sub.data, points(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Sub_metering_3)),
type = "l", col = "Blue"))
## Add legend on top right
## Use lwd instead pch to specify that lines should be drawn in the legend
## instead of point characters
## Use bty = "n" to indicate absence of box
## Use y.intersp and cex to reduce the contents
## Use negative insets to force the legend to the corner
legend("topright", col = c("Black", "Red", "Blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1, bty = "n", y.intersp = 0.25, cex = 0.75, inset = c(-0.2, -0.1))
## Create top right line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Voltage column is converted to number, supressing any warnings that ?
## characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Voltage)),
ylab = "Voltage",xlab = "datetime", type = "l"))
## Create bottom right line graph per requirements above
## Paste Date and Time column together to convert to datetime for plotting on
## the X-axis
## Global_reactive_power column is converted to number, supressing any warnings
## that ? characters bring
with(sub.data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
suppressWarnings(as.numeric(Global_reactive_power)),
ylab = "Global_reactive_power", xlab = "datetime",
type = "l"))
## Remember to close device
dev.off()
|
#EXPERIMENT 1
setwd("")
economy_bandari <- read.csv("economy_bandari.csv")
economy_svm <- read.csv("economy_svm_none.csv")
economy_svm_UNDER <- read.csv("economy_svm_UNDER.csv")
economy_svm_OVER <- read.csv("economy_svm_OVER.csv")
economy_svm_SMOTE <- read.csv("economy_svm_SMOTE.csv")
economy_svm_IS <- read.csv("economy_svm_IS.csv")
economy_mars <- read.csv("economy_mars_none.csv")
economy_mars_UNDER <- read.csv("economy_mars_UNDER.csv")
economy_mars_OVER <- read.csv("economy_mars_OVER.csv")
economy_mars_SMOTE <- read.csv("economy_mars_SMOTE.csv")
economy_mars_IS <- read.csv("economy_mars_IS.csv")
economy_rf <- read.csv("economy_rf_none.csv")
economy_rf_UNDER <- read.csv("economy_rf_UNDER.csv")
economy_rf_OVER <- read.csv("economy_rf_OVER.csv")
economy_rf_SMOTE <- read.csv("economy_rf_SMOTE.csv")
economy_rf_IS <- read.csv("economy_rf_IS.csv")
economy <- rbind(economy_bandari,
economy_svm, economy_svm_UNDER, economy_svm_OVER, economy_svm_SMOTE, economy_svm_IS,
economy_mars, economy_mars_UNDER, economy_mars_OVER, economy_mars_SMOTE, economy_mars_IS,
economy_rf, economy_rf_UNDER, economy_rf_OVER, economy_rf_SMOTE, economy_rf_IS)
economy.tb <- aggregate(economy,by=list(economy$model),FUN=mean,na.rm=TRUE)
microsoft_bandari <- read.csv("microsoft_bandari.csv")
microsoft_svm <- read.csv("microsoft_svm_none.csv")
microsoft_svm_UNDER <- read.csv("microsoft_svm_UNDER.csv")
microsoft_svm_OVER <- read.csv("microsoft_svm_OVER.csv")
microsoft_svm_SMOTE <- read.csv("microsoft_svm_SMOTE.csv")
microsoft_svm_IS <- read.csv("microsoft_svm_IS.csv")
microsoft_mars <- read.csv("microsoft_mars_none.csv")
microsoft_mars_UNDER <- read.csv("microsoft_mars_UNDER.csv")
microsoft_mars_OVER <- read.csv("microsoft_mars_OVER.csv")
microsoft_mars_SMOTE <- read.csv("microsoft_mars_SMOTE.csv")
microsoft_mars_IS <- read.csv("microsoft_mars_IS.csv")
microsoft_rf <- read.csv("microsoft_rf_none.csv")
microsoft_rf_UNDER <- read.csv("microsoft_rf_UNDER.csv")
microsoft_rf_OVER <- read.csv("microsoft_rf_OVER.csv")
microsoft_rf_SMOTE <- read.csv("microsoft_rf_SMOTE.csv")
microsoft_rf_IS <- read.csv("microsoft_rf_IS.csv")
microsoft <- rbind(microsoft_bandari,
microsoft_svm, microsoft_svm_UNDER, microsoft_svm_OVER, microsoft_svm_SMOTE, microsoft_svm_IS,
microsoft_mars, microsoft_mars_UNDER, microsoft_mars_OVER, microsoft_mars_SMOTE, microsoft_mars_IS,
microsoft_rf, microsoft_rf_UNDER, microsoft_rf_OVER, microsoft_rf_SMOTE, microsoft_rf_IS)
microsoft.tb <- aggregate(microsoft,by=list(microsoft$model),FUN=mean,na.rm=TRUE)
obama_bandari <- read.csv("obama_bandari.csv")
obama_svm <- read.csv("obama_svm_none.csv")
obama_svm_UNDER <- read.csv("obama_svm_UNDER.csv")
obama_svm_OVER <- read.csv("obama_svm_OVER.csv")
obama_svm_SMOTE <- read.csv("obama_svm_SMOTE.csv")
obama_svm_IS <- read.csv("obama_svm_IS.csv")
obama_mars <- read.csv("obama_mars_none.csv")
obama_mars_UNDER <- read.csv("obama_mars_UNDER.csv")
obama_mars_OVER <- read.csv("obama_mars_OVER.csv")
obama_mars_SMOTE <- read.csv("obama_mars_SMOTE.csv")
obama_mars_IS <- read.csv("obama_mars_IS.csv")
obama_rf <- read.csv("obama_rf_none.csv")
obama_rf_UNDER <- read.csv("obama_rf_UNDER.csv")
obama_rf_OVER <- read.csv("obama_rf_OVER.csv")
obama_rf_SMOTE <- read.csv("obama_rf_SMOTE.csv")
obama_rf_IS <- read.csv("obama_rf_IS.csv")
obama <- rbind(obama_bandari,
obama_svm, obama_svm_UNDER, obama_svm_OVER, obama_svm_SMOTE, obama_svm_IS,
obama_mars, obama_mars_UNDER, obama_mars_OVER, obama_mars_SMOTE, obama_mars_IS,
obama_rf, obama_rf_UNDER, obama_rf_OVER, obama_rf_SMOTE, obama_rf_IS)
obama.tb <- aggregate(obama,by=list(obama$model),FUN=mean,na.rm=TRUE)
palestine_bandari <- read.csv("palestine_bandari.csv")
palestine_svm <- read.csv("palestine_svm_none.csv")
palestine_svm_UNDER <- read.csv("palestine_svm_UNDER.csv")
palestine_svm_OVER <- read.csv("palestine_svm_OVER.csv")
palestine_svm_SMOTE <- read.csv("palestine_svm_SMOTE.csv")
palestine_svm_IS <- read.csv("palestine_svm_IS.csv")
palestine_mars <- read.csv("palestine_mars_none.csv")
palestine_mars_UNDER <- read.csv("palestine_mars_UNDER.csv")
palestine_mars_OVER <- read.csv("palestine_mars_OVER.csv")
palestine_mars_SMOTE <- read.csv("palestine_mars_SMOTE.csv")
palestine_mars_IS <- read.csv("palestine_mars_IS.csv")
palestine_rf <- read.csv("palestine_rf_none.csv")
palestine_rf_UNDER <- read.csv("palestine_rf_UNDER.csv")
palestine_rf_OVER <- read.csv("palestine_rf_OVER.csv")
palestine_rf_SMOTE <- read.csv("palestine_rf_SMOTE.csv")
palestine_rf_IS <- read.csv("palestine_rf_IS.csv")
palestine <- rbind(palestine_bandari,
palestine_svm, palestine_svm_UNDER, palestine_svm_OVER, palestine_svm_SMOTE, palestine_svm_IS,
palestine_mars, palestine_mars_UNDER, palestine_mars_OVER, palestine_mars_SMOTE, palestine_mars_IS,
palestine_rf, palestine_rf_UNDER, palestine_rf_OVER, palestine_rf_SMOTE, palestine_rf_IS)
palestine.tb <- aggregate(palestine,by=list(palestine$model),FUN=mean,na.rm=TRUE)
#EXPERIMENT 2
setwd("")
economy_baseline <- read.csv("exp2_economy_baseline.csv")
economy_bandari <- read.csv("exp2_economy_bandari.csv")
economy_lm <- read.csv("exp2_economy_lm.csv")
economy_mars <- read.csv("exp2_economy_mars.csv")
economy_svm <- read.csv("exp2_economy_svm.csv")
economy_rf <- read.csv("exp2_economy_rf.csv")
economy <- rbind(economy_baseline,economy_bandari,economy_lm,economy_svm,economy_mars,economy_rf)
economy.tb <- aggregate(economy,by=list(economy$model),FUN=mean,na.rm=TRUE)
microsoft_baseline <- read.csv("exp2_microsoft_baseline.csv")
microsoft_bandari <- read.csv("exp2_microsoft_bandari.csv")
microsoft_lm <- read.csv("exp2_microsoft_lm.csv")
microsoft_mars <- read.csv("exp2_microsoft_mars.csv")
microsoft_svm <- read.csv("exp2_microsoft_svm.csv")
microsoft_rf <- read.csv("exp2_microsoft_rf.csv")
microsoft <- rbind(microsoft_baseline,microsoft_bandari,microsoft_lm,microsoft_svm,microsoft_mars,microsoft_rf)
microsoft.tb <- aggregate(microsoft,by=list(microsoft$model),FUN=mean,na.rm=TRUE)
obama_baseline <- read.csv("exp2_obama_baseline.csv")
obama_bandari <- read.csv("exp2_obama_bandari.csv")
obama_lm <- read.csv("exp2_obama_lm.csv")
obama_mars <- read.csv("exp2_obama_mars.csv")
obama_svm <- read.csv("exp2_obama_svm.csv")
obama_rf <- read.csv("exp2_obama_rf.csv")
obama <- rbind(obama_baseline,obama_bandari,obama_lm,obama_svm,obama_mars,obama_rf)
obama.tb <- aggregate(obama,by=list(obama$model),FUN=mean,na.rm=TRUE)
palestine_baseline <- read.csv("exp2_palestine_baseline.csv")
palestine_bandari <- read.csv("exp2_palestine_bandari.csv")
palestine_lm <- read.csv("exp2_palestine_lm.csv")
palestine_mars <- read.csv("exp2_palestine_mars.csv")
palestine_svm <- read.csv("exp2_palestine_svm.csv")
palestine_rf <- read.csv("exp2_palestine_rf.csv")
palestine <- rbind(palestine_baseline,palestine_bandari,palestine_lm,palestine_svm,palestine_mars,palestine_rf)
palestine.tb <- aggregate(palestine,by=list(palestine$model),FUN=mean,na.rm=TRUE)
#EXPERIMENT 3
setwd("")
economy_baseline <- read.csv("exp3_economy_baseline.csv")
economy_bandari <- read.csv("exp3_economy_bandari.csv")
economy_lm <- read.csv("exp3_economy_lm.csv")
economy_mars <- read.csv("exp3_economy_mars.csv")
economy_svm <- read.csv("exp3_economy_svm.csv")
economy_rf <- read.csv("exp3_economy_rf.csv")
economy <- rbind(economy_baseline,economy_bandari,economy_lm,economy_svm,economy_mars,economy_rf)
economy.tb <- aggregate(economy,by=list(economy$model),FUN=mean,na.rm=TRUE)
microsoft_baseline <- read.csv("exp3_microsoft_baseline.csv")
microsoft_bandari <- read.csv("exp3_microsoft_bandari.csv")
microsoft_lm <- read.csv("exp3_microsoft_lm.csv")
microsoft_mars <- read.csv("exp3_microsoft_mars.csv")
microsoft_svm <- read.csv("exp3_microsoft_svm.csv")
microsoft_rf <- read.csv("exp3_microsoft_rf.csv")
microsoft <- rbind(microsoft_baseline,microsoft_bandari,microsoft_lm,microsoft_svm,microsoft_mars,microsoft_rf)
microsoft.tb <- aggregate(microsoft,by=list(microsoft$model),FUN=mean,na.rm=TRUE)
obama_baseline <- read.csv("exp3_obama_baseline.csv")
obama_bandari <- read.csv("exp3_obama_bandari.csv")
obama_lm <- read.csv("exp3_obama_lm.csv")
obama_mars <- read.csv("exp3_obama_mars.csv")
obama_svm <- read.csv("exp3_obama_svm.csv")
obama_rf <- read.csv("exp3_obama_rf.csv")
obama <- rbind(obama_baseline,obama_bandari,obama_lm,obama_svm,obama_mars,obama_rf)
obama.tb <- aggregate(obama,by=list(obama$model),FUN=mean,na.rm=TRUE)
palestine_baseline <- read.csv("exp3_palestine_baseline.csv")
palestine_bandari <- read.csv("exp3_palestine_bandari.csv")
palestine_lm <- read.csv("exp3_palestine_lm.csv")
palestine_mars <- read.csv("exp3_palestine_mars.csv")
palestine_svm <- read.csv("exp3_palestine_svm.csv")
palestine_rf <- read.csv("exp3_palestine_rf.csv")
palestine <- rbind(palestine_baseline,palestine_bandari,palestine_lm,palestine_svm,palestine_mars,palestine_rf)
palestine.tb <- aggregate(palestine,by=list(palestine$model),FUN=mean,na.rm=TRUE) | /R/HandleResultFiles.R | no_license | nunompmoniz/EPIA2016_JournalTrack | R | false | false | 9,108 | r | #EXPERIMENT 1
setwd("")
economy_bandari <- read.csv("economy_bandari.csv")
economy_svm <- read.csv("economy_svm_none.csv")
economy_svm_UNDER <- read.csv("economy_svm_UNDER.csv")
economy_svm_OVER <- read.csv("economy_svm_OVER.csv")
economy_svm_SMOTE <- read.csv("economy_svm_SMOTE.csv")
economy_svm_IS <- read.csv("economy_svm_IS.csv")
economy_mars <- read.csv("economy_mars_none.csv")
economy_mars_UNDER <- read.csv("economy_mars_UNDER.csv")
economy_mars_OVER <- read.csv("economy_mars_OVER.csv")
economy_mars_SMOTE <- read.csv("economy_mars_SMOTE.csv")
economy_mars_IS <- read.csv("economy_mars_IS.csv")
economy_rf <- read.csv("economy_rf_none.csv")
economy_rf_UNDER <- read.csv("economy_rf_UNDER.csv")
economy_rf_OVER <- read.csv("economy_rf_OVER.csv")
economy_rf_SMOTE <- read.csv("economy_rf_SMOTE.csv")
economy_rf_IS <- read.csv("economy_rf_IS.csv")
economy <- rbind(economy_bandari,
economy_svm, economy_svm_UNDER, economy_svm_OVER, economy_svm_SMOTE, economy_svm_IS,
economy_mars, economy_mars_UNDER, economy_mars_OVER, economy_mars_SMOTE, economy_mars_IS,
economy_rf, economy_rf_UNDER, economy_rf_OVER, economy_rf_SMOTE, economy_rf_IS)
economy.tb <- aggregate(economy,by=list(economy$model),FUN=mean,na.rm=TRUE)
microsoft_bandari <- read.csv("microsoft_bandari.csv")
microsoft_svm <- read.csv("microsoft_svm_none.csv")
microsoft_svm_UNDER <- read.csv("microsoft_svm_UNDER.csv")
microsoft_svm_OVER <- read.csv("microsoft_svm_OVER.csv")
microsoft_svm_SMOTE <- read.csv("microsoft_svm_SMOTE.csv")
microsoft_svm_IS <- read.csv("microsoft_svm_IS.csv")
microsoft_mars <- read.csv("microsoft_mars_none.csv")
microsoft_mars_UNDER <- read.csv("microsoft_mars_UNDER.csv")
microsoft_mars_OVER <- read.csv("microsoft_mars_OVER.csv")
microsoft_mars_SMOTE <- read.csv("microsoft_mars_SMOTE.csv")
microsoft_mars_IS <- read.csv("microsoft_mars_IS.csv")
microsoft_rf <- read.csv("microsoft_rf_none.csv")
microsoft_rf_UNDER <- read.csv("microsoft_rf_UNDER.csv")
microsoft_rf_OVER <- read.csv("microsoft_rf_OVER.csv")
microsoft_rf_SMOTE <- read.csv("microsoft_rf_SMOTE.csv")
microsoft_rf_IS <- read.csv("microsoft_rf_IS.csv")
microsoft <- rbind(microsoft_bandari,
microsoft_svm, microsoft_svm_UNDER, microsoft_svm_OVER, microsoft_svm_SMOTE, microsoft_svm_IS,
microsoft_mars, microsoft_mars_UNDER, microsoft_mars_OVER, microsoft_mars_SMOTE, microsoft_mars_IS,
microsoft_rf, microsoft_rf_UNDER, microsoft_rf_OVER, microsoft_rf_SMOTE, microsoft_rf_IS)
microsoft.tb <- aggregate(microsoft,by=list(microsoft$model),FUN=mean,na.rm=TRUE)
obama_bandari <- read.csv("obama_bandari.csv")
obama_svm <- read.csv("obama_svm_none.csv")
obama_svm_UNDER <- read.csv("obama_svm_UNDER.csv")
obama_svm_OVER <- read.csv("obama_svm_OVER.csv")
obama_svm_SMOTE <- read.csv("obama_svm_SMOTE.csv")
obama_svm_IS <- read.csv("obama_svm_IS.csv")
obama_mars <- read.csv("obama_mars_none.csv")
obama_mars_UNDER <- read.csv("obama_mars_UNDER.csv")
obama_mars_OVER <- read.csv("obama_mars_OVER.csv")
obama_mars_SMOTE <- read.csv("obama_mars_SMOTE.csv")
obama_mars_IS <- read.csv("obama_mars_IS.csv")
obama_rf <- read.csv("obama_rf_none.csv")
obama_rf_UNDER <- read.csv("obama_rf_UNDER.csv")
obama_rf_OVER <- read.csv("obama_rf_OVER.csv")
obama_rf_SMOTE <- read.csv("obama_rf_SMOTE.csv")
obama_rf_IS <- read.csv("obama_rf_IS.csv")
obama <- rbind(obama_bandari,
obama_svm, obama_svm_UNDER, obama_svm_OVER, obama_svm_SMOTE, obama_svm_IS,
obama_mars, obama_mars_UNDER, obama_mars_OVER, obama_mars_SMOTE, obama_mars_IS,
obama_rf, obama_rf_UNDER, obama_rf_OVER, obama_rf_SMOTE, obama_rf_IS)
obama.tb <- aggregate(obama,by=list(obama$model),FUN=mean,na.rm=TRUE)
palestine_bandari <- read.csv("palestine_bandari.csv")
palestine_svm <- read.csv("palestine_svm_none.csv")
palestine_svm_UNDER <- read.csv("palestine_svm_UNDER.csv")
palestine_svm_OVER <- read.csv("palestine_svm_OVER.csv")
palestine_svm_SMOTE <- read.csv("palestine_svm_SMOTE.csv")
palestine_svm_IS <- read.csv("palestine_svm_IS.csv")
palestine_mars <- read.csv("palestine_mars_none.csv")
palestine_mars_UNDER <- read.csv("palestine_mars_UNDER.csv")
palestine_mars_OVER <- read.csv("palestine_mars_OVER.csv")
palestine_mars_SMOTE <- read.csv("palestine_mars_SMOTE.csv")
palestine_mars_IS <- read.csv("palestine_mars_IS.csv")
palestine_rf <- read.csv("palestine_rf_none.csv")
palestine_rf_UNDER <- read.csv("palestine_rf_UNDER.csv")
palestine_rf_OVER <- read.csv("palestine_rf_OVER.csv")
palestine_rf_SMOTE <- read.csv("palestine_rf_SMOTE.csv")
palestine_rf_IS <- read.csv("palestine_rf_IS.csv")
palestine <- rbind(palestine_bandari,
palestine_svm, palestine_svm_UNDER, palestine_svm_OVER, palestine_svm_SMOTE, palestine_svm_IS,
palestine_mars, palestine_mars_UNDER, palestine_mars_OVER, palestine_mars_SMOTE, palestine_mars_IS,
palestine_rf, palestine_rf_UNDER, palestine_rf_OVER, palestine_rf_SMOTE, palestine_rf_IS)
palestine.tb <- aggregate(palestine,by=list(palestine$model),FUN=mean,na.rm=TRUE)
#EXPERIMENT 2
setwd("")
economy_baseline <- read.csv("exp2_economy_baseline.csv")
economy_bandari <- read.csv("exp2_economy_bandari.csv")
economy_lm <- read.csv("exp2_economy_lm.csv")
economy_mars <- read.csv("exp2_economy_mars.csv")
economy_svm <- read.csv("exp2_economy_svm.csv")
economy_rf <- read.csv("exp2_economy_rf.csv")
economy <- rbind(economy_baseline,economy_bandari,economy_lm,economy_svm,economy_mars,economy_rf)
economy.tb <- aggregate(economy,by=list(economy$model),FUN=mean,na.rm=TRUE)
microsoft_baseline <- read.csv("exp2_microsoft_baseline.csv")
microsoft_bandari <- read.csv("exp2_microsoft_bandari.csv")
microsoft_lm <- read.csv("exp2_microsoft_lm.csv")
microsoft_mars <- read.csv("exp2_microsoft_mars.csv")
microsoft_svm <- read.csv("exp2_microsoft_svm.csv")
microsoft_rf <- read.csv("exp2_microsoft_rf.csv")
microsoft <- rbind(microsoft_baseline,microsoft_bandari,microsoft_lm,microsoft_svm,microsoft_mars,microsoft_rf)
microsoft.tb <- aggregate(microsoft,by=list(microsoft$model),FUN=mean,na.rm=TRUE)
obama_baseline <- read.csv("exp2_obama_baseline.csv")
obama_bandari <- read.csv("exp2_obama_bandari.csv")
obama_lm <- read.csv("exp2_obama_lm.csv")
obama_mars <- read.csv("exp2_obama_mars.csv")
obama_svm <- read.csv("exp2_obama_svm.csv")
obama_rf <- read.csv("exp2_obama_rf.csv")
obama <- rbind(obama_baseline,obama_bandari,obama_lm,obama_svm,obama_mars,obama_rf)
obama.tb <- aggregate(obama,by=list(obama$model),FUN=mean,na.rm=TRUE)
palestine_baseline <- read.csv("exp2_palestine_baseline.csv")
palestine_bandari <- read.csv("exp2_palestine_bandari.csv")
palestine_lm <- read.csv("exp2_palestine_lm.csv")
palestine_mars <- read.csv("exp2_palestine_mars.csv")
palestine_svm <- read.csv("exp2_palestine_svm.csv")
palestine_rf <- read.csv("exp2_palestine_rf.csv")
palestine <- rbind(palestine_baseline,palestine_bandari,palestine_lm,palestine_svm,palestine_mars,palestine_rf)
palestine.tb <- aggregate(palestine,by=list(palestine$model),FUN=mean,na.rm=TRUE)
#EXPERIMENT 3
setwd("")
economy_baseline <- read.csv("exp3_economy_baseline.csv")
economy_bandari <- read.csv("exp3_economy_bandari.csv")
economy_lm <- read.csv("exp3_economy_lm.csv")
economy_mars <- read.csv("exp3_economy_mars.csv")
economy_svm <- read.csv("exp3_economy_svm.csv")
economy_rf <- read.csv("exp3_economy_rf.csv")
economy <- rbind(economy_baseline,economy_bandari,economy_lm,economy_svm,economy_mars,economy_rf)
economy.tb <- aggregate(economy,by=list(economy$model),FUN=mean,na.rm=TRUE)
microsoft_baseline <- read.csv("exp3_microsoft_baseline.csv")
microsoft_bandari <- read.csv("exp3_microsoft_bandari.csv")
microsoft_lm <- read.csv("exp3_microsoft_lm.csv")
microsoft_mars <- read.csv("exp3_microsoft_mars.csv")
microsoft_svm <- read.csv("exp3_microsoft_svm.csv")
microsoft_rf <- read.csv("exp3_microsoft_rf.csv")
microsoft <- rbind(microsoft_baseline,microsoft_bandari,microsoft_lm,microsoft_svm,microsoft_mars,microsoft_rf)
microsoft.tb <- aggregate(microsoft,by=list(microsoft$model),FUN=mean,na.rm=TRUE)
obama_baseline <- read.csv("exp3_obama_baseline.csv")
obama_bandari <- read.csv("exp3_obama_bandari.csv")
obama_lm <- read.csv("exp3_obama_lm.csv")
obama_mars <- read.csv("exp3_obama_mars.csv")
obama_svm <- read.csv("exp3_obama_svm.csv")
obama_rf <- read.csv("exp3_obama_rf.csv")
obama <- rbind(obama_baseline,obama_bandari,obama_lm,obama_svm,obama_mars,obama_rf)
obama.tb <- aggregate(obama,by=list(obama$model),FUN=mean,na.rm=TRUE)
palestine_baseline <- read.csv("exp3_palestine_baseline.csv")
palestine_bandari <- read.csv("exp3_palestine_bandari.csv")
palestine_lm <- read.csv("exp3_palestine_lm.csv")
palestine_mars <- read.csv("exp3_palestine_mars.csv")
palestine_svm <- read.csv("exp3_palestine_svm.csv")
palestine_rf <- read.csv("exp3_palestine_rf.csv")
palestine <- rbind(palestine_baseline,palestine_bandari,palestine_lm,palestine_svm,palestine_mars,palestine_rf)
palestine.tb <- aggregate(palestine,by=list(palestine$model),FUN=mean,na.rm=TRUE) |
library(psplinesl1)
library(reshape2)
paperPath <- "../../paper/plots"
presentPath <- "../../presentation/plots"
posterPath <- "../../poster/plots"
path <- "/home/bsegal/Documents/Research/data_empatica"
# data prep -------------------------------------------------------------------
data <- read.csv(file.path(path, "groupA.csv"))
data <- data[with(data, order(id, x)), ]
# setup design matrices
X <- list(ps(x = "x", norder = 4, k = 1, data = data, width = 5),
ps(x = "x", norder = 4, k = 1, data = data, width = 5,
by = "high", center = FALSE))
rand <- re(x = "x", id = "id", data = data,
randomCurves = TRUE, width = 5,
norder = 4, derivOrder = 2)
system.time({
m1 <- admm(y = "y", X = X, rand = rand,
id = "id",
tau = 450,
lambda = c(1, 10),
rho = 5,
epsilonAbs = 1e-4,
epsilonRel = 1e-4,
iterMax = 1e3,
warm = NULL,
data = data,
forCV = FALSE,
centerZ = FALSE)
})
# user system elapsed
# 10.792 0.052 10.858
m1$conv$iter
# [1] 300
signif(m1$fit$df, 3)
# Overall F1 F2 Z
# Stein 185 9.98 1.97 172
# Restricted 194 10.00 2.00 181
# ADMM 193 9.00 2.00 181
# Ridge 196 19.50 8.00 167
# Ridge restricted 216 21.10 13.50 181
plot(log(m1$conv$rNorm))
plot(log(m1$conv$sNorm))
m1$coefs$beta0
# [1] -0.9897339
#sigma2b
m1$fit$sigma2 / m1$params$tau
# [1] 4.852319e-05
m1$fit$sigma2
# [1] 0.02183543
m1$fit$sigma2Ridge
# [1] 0.02198702
data$yHat <- m1$fit$yHat
data$randEff <- with(m1, as.vector(params$rand$Z %*% coefs$b))
data$bRem <- with(data, y - randEff)
# residuals
dev.new()
qplot(data$yHat, data$y - data$yHat)+
theme_bw(22)+
labs(x = expression(hat(y)), y = expression(paste("y - ",hat(y))))
ggsave(file.path(paperPath, "EDA_L1_resid_2.png"))
# confidence bands
CI <- ci(model = m1, alpha = 0.05)
plot(CI)
CIpoly <- data.frame(x = c(CI[[1]]$x, rev(CI[[1]]$x)),
y = c(CI[[1]]$lower, rev(CI[[1]]$upper)),
id = 0)
CIpoint <- data.frame(x= CI[[1]]$x, y = CI[[1]]$smooth, id = 0)
ggplot(aes(x = x, y = y, group = id), data = CIpoly)+
geom_polygon(fill = "grey")+
geom_line(data = CIpoint, size = 1, color = "red")+
theme_bw(28)+
labs(x = "Minute", y = expression(paste("lo",g[10], "(EDA)", sep = "")))+
scale_y_continuous(lim = c(-3,1))
ggsave(file.path(paperPath, "EDA_L1_smooth1_poster.png"))
ggsave(file.path(presentPath, "EDA_L1_smooth1_poster.png"))
CIpoly <- data.frame(x = c(CI[[2]]$x, rev(CI[[2]]$x)),
y = c(CI[[2]]$lower, rev(CI[[2]]$upper)),
id = 0)
CIpoint <- data.frame(x= CI[[2]]$x, y = CI[[2]]$smooth, id = 0)
ggplot(aes(x = x, y = y, group = id), data = CIpoly)+
geom_polygon(fill = "grey")+
geom_line(data = CIpoint, size = 1, color = "purple")+
theme_bw(28)+
geom_hline(yintercept = 0, linetype = "dashed")+
labs(x = "Minute", y = expression(paste("lo",g[10], "(EDA)", sep = "")))+
scale_y_continuous(lim = c(-1, 1))
ggsave(file.path(paperPath, "EDA_L1_smooth2_poster.png"))
ggsave(file.path(presentPath, "EDA_L1_smooth2_poster.png"))
# predicted curves
dataM <- melt(data, id.vars = c("x", "id", "type"),
measure.vars = c("y", "yHat"))
dataM$type <- factor(dataM$type, levels = c("low", "high"))
levels(dataM$type) <- c("Low vigilance", "High vigilance")
ID <- unique(dataM$id)
gg <- ggplot() + theme_bw(20) +
# scale_color_manual("", values = c("grey", "black"), labels = c("Observed", "Predicted"))+
scale_color_manual(guid = FALSE, "", values = c("blue", "red"), labels = c("High", "Low"))+
scale_alpha_manual("", values = c(.3, 1), labels = c("Observed", "Predicted"))+
scale_linetype_manual("", values = c("solid", "dashed"), labels = c("Observed", "Predicted"))+
labs(y = expression(paste("lo", g[10], "(EDA)", sep = "")), x = "Minute")
for (i in 1:length(ID)) {
gg <- gg + geom_line(data = dataM[which(dataM$id == ID[i]), ],
aes(x = x, y = value,
alpha = variable,
color = type,
linetype = variable))
}
dev.new(width = 9.5, height = 6.5)
gg +
guides(alpha = guide_legend(
keywidth=0.35,
keyheight=0.35,
default.unit="inch"),
linetype = guide_legend(
keywidth=0.35,
keyheight=0.35,
default.unit="inch")
)+
theme(legend.position = "bottom")+
facet_wrap(~type)
ggsave(file.path(paperPath, "EDA_L1_obs_pred.png"))
ggsave(file.path(presentPath, "EDA_L1_obs_pred.png"))
# MSE
with(data, mean((y - yHat)^2))
# [1] 0.01945408 | /application/analyze_EDA_l1.R | no_license | maj-biostat/code-for-psplinesl1-paper | R | false | false | 4,832 | r | library(psplinesl1)
library(reshape2)
paperPath <- "../../paper/plots"
presentPath <- "../../presentation/plots"
posterPath <- "../../poster/plots"
path <- "/home/bsegal/Documents/Research/data_empatica"
# data prep -------------------------------------------------------------------
data <- read.csv(file.path(path, "groupA.csv"))
data <- data[with(data, order(id, x)), ]
# setup design matrices
X <- list(ps(x = "x", norder = 4, k = 1, data = data, width = 5),
ps(x = "x", norder = 4, k = 1, data = data, width = 5,
by = "high", center = FALSE))
rand <- re(x = "x", id = "id", data = data,
randomCurves = TRUE, width = 5,
norder = 4, derivOrder = 2)
system.time({
m1 <- admm(y = "y", X = X, rand = rand,
id = "id",
tau = 450,
lambda = c(1, 10),
rho = 5,
epsilonAbs = 1e-4,
epsilonRel = 1e-4,
iterMax = 1e3,
warm = NULL,
data = data,
forCV = FALSE,
centerZ = FALSE)
})
# user system elapsed
# 10.792 0.052 10.858
m1$conv$iter
# [1] 300
signif(m1$fit$df, 3)
# Overall F1 F2 Z
# Stein 185 9.98 1.97 172
# Restricted 194 10.00 2.00 181
# ADMM 193 9.00 2.00 181
# Ridge 196 19.50 8.00 167
# Ridge restricted 216 21.10 13.50 181
plot(log(m1$conv$rNorm))
plot(log(m1$conv$sNorm))
m1$coefs$beta0
# [1] -0.9897339
#sigma2b
m1$fit$sigma2 / m1$params$tau
# [1] 4.852319e-05
m1$fit$sigma2
# [1] 0.02183543
m1$fit$sigma2Ridge
# [1] 0.02198702
data$yHat <- m1$fit$yHat
data$randEff <- with(m1, as.vector(params$rand$Z %*% coefs$b))
data$bRem <- with(data, y - randEff)
# residuals
dev.new()
qplot(data$yHat, data$y - data$yHat)+
theme_bw(22)+
labs(x = expression(hat(y)), y = expression(paste("y - ",hat(y))))
ggsave(file.path(paperPath, "EDA_L1_resid_2.png"))
# confidence bands
CI <- ci(model = m1, alpha = 0.05)
plot(CI)
CIpoly <- data.frame(x = c(CI[[1]]$x, rev(CI[[1]]$x)),
y = c(CI[[1]]$lower, rev(CI[[1]]$upper)),
id = 0)
CIpoint <- data.frame(x= CI[[1]]$x, y = CI[[1]]$smooth, id = 0)
ggplot(aes(x = x, y = y, group = id), data = CIpoly)+
geom_polygon(fill = "grey")+
geom_line(data = CIpoint, size = 1, color = "red")+
theme_bw(28)+
labs(x = "Minute", y = expression(paste("lo",g[10], "(EDA)", sep = "")))+
scale_y_continuous(lim = c(-3,1))
ggsave(file.path(paperPath, "EDA_L1_smooth1_poster.png"))
ggsave(file.path(presentPath, "EDA_L1_smooth1_poster.png"))
CIpoly <- data.frame(x = c(CI[[2]]$x, rev(CI[[2]]$x)),
y = c(CI[[2]]$lower, rev(CI[[2]]$upper)),
id = 0)
CIpoint <- data.frame(x= CI[[2]]$x, y = CI[[2]]$smooth, id = 0)
ggplot(aes(x = x, y = y, group = id), data = CIpoly)+
geom_polygon(fill = "grey")+
geom_line(data = CIpoint, size = 1, color = "purple")+
theme_bw(28)+
geom_hline(yintercept = 0, linetype = "dashed")+
labs(x = "Minute", y = expression(paste("lo",g[10], "(EDA)", sep = "")))+
scale_y_continuous(lim = c(-1, 1))
ggsave(file.path(paperPath, "EDA_L1_smooth2_poster.png"))
ggsave(file.path(presentPath, "EDA_L1_smooth2_poster.png"))
# predicted curves
dataM <- melt(data, id.vars = c("x", "id", "type"),
measure.vars = c("y", "yHat"))
dataM$type <- factor(dataM$type, levels = c("low", "high"))
levels(dataM$type) <- c("Low vigilance", "High vigilance")
ID <- unique(dataM$id)
gg <- ggplot() + theme_bw(20) +
# scale_color_manual("", values = c("grey", "black"), labels = c("Observed", "Predicted"))+
scale_color_manual(guid = FALSE, "", values = c("blue", "red"), labels = c("High", "Low"))+
scale_alpha_manual("", values = c(.3, 1), labels = c("Observed", "Predicted"))+
scale_linetype_manual("", values = c("solid", "dashed"), labels = c("Observed", "Predicted"))+
labs(y = expression(paste("lo", g[10], "(EDA)", sep = "")), x = "Minute")
for (i in 1:length(ID)) {
gg <- gg + geom_line(data = dataM[which(dataM$id == ID[i]), ],
aes(x = x, y = value,
alpha = variable,
color = type,
linetype = variable))
}
dev.new(width = 9.5, height = 6.5)
gg +
guides(alpha = guide_legend(
keywidth=0.35,
keyheight=0.35,
default.unit="inch"),
linetype = guide_legend(
keywidth=0.35,
keyheight=0.35,
default.unit="inch")
)+
theme(legend.position = "bottom")+
facet_wrap(~type)
ggsave(file.path(paperPath, "EDA_L1_obs_pred.png"))
ggsave(file.path(presentPath, "EDA_L1_obs_pred.png"))
# MSE
with(data, mean((y - yHat)^2))
# [1] 0.01945408 |
splinePlot <- function(eSetObject, df, reference, toPlot="all") {
if(!is(eSetObject, "ExpressionSet")) stop("eSetObject must be of class ExpressionSet")
if(!(("SampleName" %in% names(pData(eSetObject))) & ("Time" %in% names(pData(eSetObject))) & ("Treatment" %in% names(pData(eSetObject))))) stop("eSetObject has to include SampleName, Time and Treatment columns in phenotypic data")
if(!(is(df, "numeric") & (df%%1 == 0) & (df > 0))) stop("df must be integer > 0")
if(!(reference %in% levels(factor(pData(eSetObject)$Treatment)))) stop("define valid reference group")
if(all(toPlot == "all")) {
toPlot = rownames(exprs(eSetObject))
} else {
if(!is(toPlot, "character")) stop("define row names of exprs(eSetObject) to plot")
}
if(!all(toPlot %in% rownames(exprs(eSetObject)))) stop("some of provided names for plotting are not included in eSetObject")
b_ <- ns(pData(eSetObject)$Time, df=df)
d_ <- factor(pData(eSetObject)$Treatment, levels=c(reference, setdiff(levels(factor(pData(eSetObject)$Treatment)), reference)))
design <- model.matrix(~d_*b_)
fit <- lmFit(eSetObject, design)
exprs.data <- exprs(eSetObject)
factorTreatment <- levels(d_)
timePoints_C <- unique(pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[1]])
timePoints_T <- unique(pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[2]])
regressionMatrix_C <- ns(timePoints_C, df=df)
regressionMatrix_T <- ns(timePoints_T, df=df)
newTime <- seq(min(c(timePoints_C,timePoints_T)), max(c(timePoints_C,timePoints_T)), length.out=101)
regressionMatrixEval_C <- predict(regressionMatrix_C, newTime)
regressionMatrixEval_T <- predict(regressionMatrix_T, newTime)
number = length(toPlot)
legendComp = c(factorTreatment[1],factorTreatment[2])
ylim = c(min(exprs.data[toPlot,])-0.25, max(exprs.data[toPlot,])+0.25)
pdf(paste("plots_df",df,"_spline.pdf",sep=""), width=6.5, height=6.5)
for(i in 1:number)
{
ix <- which(toPlot[i] == row.names(exprs.data))
data_C <- exprs.data[ix,pData(eSetObject)$Treatment == factorTreatment[1]]
data_T <- exprs.data[ix,pData(eSetObject)$Treatment == factorTreatment[2]]
timePoints_C = pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[1]]
timePoints_T = pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[2]]
plot(timePoints_C, data_C, ylim=ylim, col=4, pch=20, main=paste(toPlot[i], sep="\n"), xlab="time", ylab="expression")
points(timePoints_T, data_T, col=2, pch=20)
legend("topright", lty=c(1,1), lwd=c(1.5,1.5), legendComp, col=c(4,2))
coeffs <- fit$coefficient[ix,]
newY_C <- coeffs[1]
newY_T <- coeffs[1]+coeffs[2]
for(i in c(3:(df*2+2-df))){
newY_C <- newY_C + coeffs[i]*regressionMatrixEval_C[,(i-df+1)]
newY_T <- newY_T + (coeffs[i]+coeffs[i+df])*regressionMatrixEval_T[,(i-df+1)]
}
lines(newTime, newY_C, col=4)
lines(newTime, newY_T, col=2)
}
invisible(dev.off())
} | /R/splinePlot.R | no_license | ZytoHMGU/splineTimeR | R | false | false | 2,940 | r | splinePlot <- function(eSetObject, df, reference, toPlot="all") {
if(!is(eSetObject, "ExpressionSet")) stop("eSetObject must be of class ExpressionSet")
if(!(("SampleName" %in% names(pData(eSetObject))) & ("Time" %in% names(pData(eSetObject))) & ("Treatment" %in% names(pData(eSetObject))))) stop("eSetObject has to include SampleName, Time and Treatment columns in phenotypic data")
if(!(is(df, "numeric") & (df%%1 == 0) & (df > 0))) stop("df must be integer > 0")
if(!(reference %in% levels(factor(pData(eSetObject)$Treatment)))) stop("define valid reference group")
if(all(toPlot == "all")) {
toPlot = rownames(exprs(eSetObject))
} else {
if(!is(toPlot, "character")) stop("define row names of exprs(eSetObject) to plot")
}
if(!all(toPlot %in% rownames(exprs(eSetObject)))) stop("some of provided names for plotting are not included in eSetObject")
b_ <- ns(pData(eSetObject)$Time, df=df)
d_ <- factor(pData(eSetObject)$Treatment, levels=c(reference, setdiff(levels(factor(pData(eSetObject)$Treatment)), reference)))
design <- model.matrix(~d_*b_)
fit <- lmFit(eSetObject, design)
exprs.data <- exprs(eSetObject)
factorTreatment <- levels(d_)
timePoints_C <- unique(pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[1]])
timePoints_T <- unique(pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[2]])
regressionMatrix_C <- ns(timePoints_C, df=df)
regressionMatrix_T <- ns(timePoints_T, df=df)
newTime <- seq(min(c(timePoints_C,timePoints_T)), max(c(timePoints_C,timePoints_T)), length.out=101)
regressionMatrixEval_C <- predict(regressionMatrix_C, newTime)
regressionMatrixEval_T <- predict(regressionMatrix_T, newTime)
number = length(toPlot)
legendComp = c(factorTreatment[1],factorTreatment[2])
ylim = c(min(exprs.data[toPlot,])-0.25, max(exprs.data[toPlot,])+0.25)
pdf(paste("plots_df",df,"_spline.pdf",sep=""), width=6.5, height=6.5)
for(i in 1:number)
{
ix <- which(toPlot[i] == row.names(exprs.data))
data_C <- exprs.data[ix,pData(eSetObject)$Treatment == factorTreatment[1]]
data_T <- exprs.data[ix,pData(eSetObject)$Treatment == factorTreatment[2]]
timePoints_C = pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[1]]
timePoints_T = pData(eSetObject)$Time[pData(eSetObject)$Treatment == factorTreatment[2]]
plot(timePoints_C, data_C, ylim=ylim, col=4, pch=20, main=paste(toPlot[i], sep="\n"), xlab="time", ylab="expression")
points(timePoints_T, data_T, col=2, pch=20)
legend("topright", lty=c(1,1), lwd=c(1.5,1.5), legendComp, col=c(4,2))
coeffs <- fit$coefficient[ix,]
newY_C <- coeffs[1]
newY_T <- coeffs[1]+coeffs[2]
for(i in c(3:(df*2+2-df))){
newY_C <- newY_C + coeffs[i]*regressionMatrixEval_C[,(i-df+1)]
newY_T <- newY_T + (coeffs[i]+coeffs[i+df])*regressionMatrixEval_T[,(i-df+1)]
}
lines(newTime, newY_C, col=4)
lines(newTime, newY_T, col=2)
}
invisible(dev.off())
} |
# generate some fake data
x <- 1:10
y <- 1 + 0.2*x + rnorm(10)
#print out x and y in a matrix (doesn't alter x or y)
cbind(x,y)
# default plot
plot(x,y)
# plot documentation gives some information about
# what can be customized
# we'll go through some of these but not all.
# it's important to learn to read the documentation
?plot
# plot is a function that behaves differently based
# on the type of arguments passed to it
# for example, if we put a linear model object in...
m1 <- lm( y ~ x )
plot(m1)
# R knows to call a different function plot.lm
?plot.lm
# there are many others
?plot.ts # argument is a time series object
?plot.function # argument is a function (like cos)
?plot.data.frame # data frame
?plot.density # density object
# you get the picture.
# Calling plot( object ) calls plot."cls"(object)
# where "cls" = class(object)
# back to simple things
# for things like plot(x,y), it calls plot.default
?plot.default
# let's customize our plot
# make axis labels with xlab and ylab
plot(x,y, xlab = "Effort", ylab = "Reward")
# title with main and subtitle with sub
plot(x,y, xlab = "Effort", ylab = "Reward", main = "Reward v. Effort")
plot(x,y, xlab = "Effort", ylab = "Reward", main = "Reward v. Effort", sub = "10 datapoints")
# change font family
plot(x,y,xlab = "Effort", ylab = "Reward", family = "sans") #Default
plot(x,y,xlab = "Effort", ylab = "Reward", family = "serif")
plot(x,y,xlab = "Effort", ylab = "Reward", family = "mono")
# change plotting symbol with pch
plot(x,y,xlab = "Effort", ylab = "Reward", pch = "+")
# can also put in a number between 0 and 25, each giving diff symbol
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 12)
plot(0:25,abs(0:25-12.5),pch=0:25)
# exercise, find the filled circle plotting symbol and change
# the plot to usefilled circles
# to see all the parameters you can set:
?par
# change size with cex
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, cex = 1/2)
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, cex = 2)
# change axes with xlim and ylim
plot(x,y, xlab = "Effort", ylab = "Reward", pch = 16, xlim = c(-2,12), ylim = c(-3,6) )
# change color with col, named color, rgb value, or hex
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, col = "magenta")
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, col = rgb(1,1/2,0))
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, col = "#00CCFF")
# Exercise:
# Make a plot with blue points in three different ways
# hint: in rgb, blue is 100% blue
# hint: look up hex code for blue
# change plot type with type
plot(x,y,xlab = "Effort", ylab = "Reward", type = "p") # default
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l") # lines
plot(x,y,xlab = "Effort", ylab = "Reward", type = "b") # both (non-overlap)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "o") # both (overlap)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "n") # nothing
# last one seems silly, but is useful for customized plots
# lwd and lty for line widths and types
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lwd = 0.4)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lwd = 3.8)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lty = 2)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lty = 3)
# Exercise: make a plot with dot-dash line type
# use points and lines to add points or lines to a plot
# calling plot a second time overwrites the plots
y2 <- 3 - 0.2*x + rnorm(10)
plot(x,y)
plot(x,y2,col = "blue") # overwrites
plot(x,y)
points(x,y2, col = "blue") # add them
plot(x,y,type = "l", ylim = c(-2,5))
lines(x,y2,col = "blue")
# when adding points or lines, make sure that the added graphics
# fall inside the axes of the plot. Plot axes are not resized
# after being originally created with plot function
# add text with the text function
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16)
text(4,3,"Relationship not so clear")
# can add lots of text with vectors
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, xlim = c(0,10), ylim = c(0,5))
text(x - 0.2, y + 0.2, round(y,2)) # converts round(y,2) to string
# can even use text as the plotting symbol (good use of type = "n")
plot(x,y,xlab = "Effort", ylab = "Reward", type = "n")
text(x, y, round(y,2))
# add legend to a plot
plot(x,y, type = "o", ylim = c(-2,5))
lines(x,y2, col = "blue")
points(x,y2, col = "blue")
legend("topright", legend = c("old","new"), col = c("black","blue"), pch = 1, lty = 1)
# can put legend outside
plot(x,y,type = "l")
lines(x,y2,col = "blue")
legend(8,20, legend = c("old","new"), col = c("black","blue"), lty = 1, xpd = T)
# bonus exercise for fun:
## for the figure to reproduce use the inbuilt dataset CO2
## Use Alt+Shift+K to see all possible keyboard shortcuts
## To easily update R go through the 3rd (for WIndows)
## and 5th (for MAC) answers in the following post -
## https://stackoverflow.com/questions/13656699/update-r-using-rstudio
## Please come to OH or talk to the TA after lab if there's trouble | /labs/lab2.R | no_license | Xime82/btry6020_2021 | R | false | false | 5,027 | r | # generate some fake data
x <- 1:10
y <- 1 + 0.2*x + rnorm(10)
#print out x and y in a matrix (doesn't alter x or y)
cbind(x,y)
# default plot
plot(x,y)
# plot documentation gives some information about
# what can be customized
# we'll go through some of these but not all.
# it's important to learn to read the documentation
?plot
# plot is a function that behaves differently based
# on the type of arguments passed to it
# for example, if we put a linear model object in...
m1 <- lm( y ~ x )
plot(m1)
# R knows to call a different function plot.lm
?plot.lm
# there are many others
?plot.ts # argument is a time series object
?plot.function # argument is a function (like cos)
?plot.data.frame # data frame
?plot.density # density object
# you get the picture.
# Calling plot( object ) calls plot."cls"(object)
# where "cls" = class(object)
# back to simple things
# for things like plot(x,y), it calls plot.default
?plot.default
# let's customize our plot
# make axis labels with xlab and ylab
plot(x,y, xlab = "Effort", ylab = "Reward")
# title with main and subtitle with sub
plot(x,y, xlab = "Effort", ylab = "Reward", main = "Reward v. Effort")
plot(x,y, xlab = "Effort", ylab = "Reward", main = "Reward v. Effort", sub = "10 datapoints")
# change font family
plot(x,y,xlab = "Effort", ylab = "Reward", family = "sans") #Default
plot(x,y,xlab = "Effort", ylab = "Reward", family = "serif")
plot(x,y,xlab = "Effort", ylab = "Reward", family = "mono")
# change plotting symbol with pch
plot(x,y,xlab = "Effort", ylab = "Reward", pch = "+")
# can also put in a number between 0 and 25, each giving diff symbol
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 12)
plot(0:25,abs(0:25-12.5),pch=0:25)
# exercise, find the filled circle plotting symbol and change
# the plot to usefilled circles
# to see all the parameters you can set:
?par
# change size with cex
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, cex = 1/2)
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, cex = 2)
# change axes with xlim and ylim
plot(x,y, xlab = "Effort", ylab = "Reward", pch = 16, xlim = c(-2,12), ylim = c(-3,6) )
# change color with col, named color, rgb value, or hex
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, col = "magenta")
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, col = rgb(1,1/2,0))
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, col = "#00CCFF")
# Exercise:
# Make a plot with blue points in three different ways
# hint: in rgb, blue is 100% blue
# hint: look up hex code for blue
# change plot type with type
plot(x,y,xlab = "Effort", ylab = "Reward", type = "p") # default
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l") # lines
plot(x,y,xlab = "Effort", ylab = "Reward", type = "b") # both (non-overlap)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "o") # both (overlap)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "n") # nothing
# last one seems silly, but is useful for customized plots
# lwd and lty for line widths and types
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lwd = 0.4)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lwd = 3.8)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lty = 2)
plot(x,y,xlab = "Effort", ylab = "Reward", type = "l", lty = 3)
# Exercise: make a plot with dot-dash line type
# use points and lines to add points or lines to a plot
# calling plot a second time overwrites the plots
y2 <- 3 - 0.2*x + rnorm(10)
plot(x,y)
plot(x,y2,col = "blue") # overwrites
plot(x,y)
points(x,y2, col = "blue") # add them
plot(x,y,type = "l", ylim = c(-2,5))
lines(x,y2,col = "blue")
# when adding points or lines, make sure that the added graphics
# fall inside the axes of the plot. Plot axes are not resized
# after being originally created with plot function
# add text with the text function
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16)
text(4,3,"Relationship not so clear")
# can add lots of text with vectors
plot(x,y,xlab = "Effort", ylab = "Reward", pch = 16, xlim = c(0,10), ylim = c(0,5))
text(x - 0.2, y + 0.2, round(y,2)) # converts round(y,2) to string
# can even use text as the plotting symbol (good use of type = "n")
plot(x,y,xlab = "Effort", ylab = "Reward", type = "n")
text(x, y, round(y,2))
# add legend to a plot
plot(x,y, type = "o", ylim = c(-2,5))
lines(x,y2, col = "blue")
points(x,y2, col = "blue")
legend("topright", legend = c("old","new"), col = c("black","blue"), pch = 1, lty = 1)
# can put legend outside
plot(x,y,type = "l")
lines(x,y2,col = "blue")
legend(8,20, legend = c("old","new"), col = c("black","blue"), lty = 1, xpd = T)
# bonus exercise for fun:
## for the figure to reproduce use the inbuilt dataset CO2
## Use Alt+Shift+K to see all possible keyboard shortcuts
## To easily update R go through the 3rd (for WIndows)
## and 5th (for MAC) answers in the following post -
## https://stackoverflow.com/questions/13656699/update-r-using-rstudio
## Please come to OH or talk to the TA after lab if there's trouble |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkInputs.R
\name{checkBatchConsistency}
\alias{checkBatchConsistency}
\alias{checkIfSCE}
\alias{checkRestrictions}
\title{Check batch inputs}
\usage{
checkBatchConsistency(batches, cells.in.columns = TRUE)
checkIfSCE(batches)
checkRestrictions(batches, restrictions, cells.in.columns = TRUE)
}
\arguments{
\item{batches}{A list of batches, usually containing gene expression matrices or \linkS4class{SingleCellExperiment} objects.}
\item{cells.in.columns}{A logical scalar specifying whether batches contain cells in the columns.}
\item{restrictions}{A list of length equal to \code{batches}, specifying the cells in each batch that should be used for correction.}
}
\value{
\code{checkBatchConsistency} return an invisible \code{NULL} if there are no errors.
\code{checkIfSCE} will return a logical vector specifying whether each element of \code{batches} is a SingleCellExperiment objects.
\code{checkRestrictions} will return \code{NULL} if \code{restrictions=NULL}.
Otherwise, it will return a list by taking \code{restrictions} and converting each non-\code{NULL} element into an integer subsetting vector.
}
\description{
Utilities to check inputs into batch correction functions.
}
\details{
These functions are intended for internal use and other package developers.
\code{checkBatchConsistency} will check whether the input \code{batches} are consistent with respect to the size of the dimension containing features (i.e., not cells).
It will also verify that the dimension names are consistent, to avoid problems from variable ordering of rows/columns in the inputs.
\code{checkRestrictions} will check whether \code{restrictions} are consistent with the supplied \code{batches},
in terms of the length and names of the two lists.
It will also check that each batch contains at least one usable cell after restriction.
}
\examples{
checkBatchConsistency(list(cbind(1:5), cbind(1:5, 2:6)))
try( # fails
checkBatchConsistency(list(cbind(1:5), cbind(1:4, 2:5)))
)
}
\seealso{
\code{\link{divideIntoBatches}}
}
\author{
Aaron Lun
}
| /man/checkInputs.Rd | no_license | LTLA/batchelor | R | false | true | 2,132 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkInputs.R
\name{checkBatchConsistency}
\alias{checkBatchConsistency}
\alias{checkIfSCE}
\alias{checkRestrictions}
\title{Check batch inputs}
\usage{
checkBatchConsistency(batches, cells.in.columns = TRUE)
checkIfSCE(batches)
checkRestrictions(batches, restrictions, cells.in.columns = TRUE)
}
\arguments{
\item{batches}{A list of batches, usually containing gene expression matrices or \linkS4class{SingleCellExperiment} objects.}
\item{cells.in.columns}{A logical scalar specifying whether batches contain cells in the columns.}
\item{restrictions}{A list of length equal to \code{batches}, specifying the cells in each batch that should be used for correction.}
}
\value{
\code{checkBatchConsistency} return an invisible \code{NULL} if there are no errors.
\code{checkIfSCE} will return a logical vector specifying whether each element of \code{batches} is a SingleCellExperiment objects.
\code{checkRestrictions} will return \code{NULL} if \code{restrictions=NULL}.
Otherwise, it will return a list by taking \code{restrictions} and converting each non-\code{NULL} element into an integer subsetting vector.
}
\description{
Utilities to check inputs into batch correction functions.
}
\details{
These functions are intended for internal use and other package developers.
\code{checkBatchConsistency} will check whether the input \code{batches} are consistent with respect to the size of the dimension containing features (i.e., not cells).
It will also verify that the dimension names are consistent, to avoid problems from variable ordering of rows/columns in the inputs.
\code{checkRestrictions} will check whether \code{restrictions} are consistent with the supplied \code{batches},
in terms of the length and names of the two lists.
It will also check that each batch contains at least one usable cell after restriction.
}
\examples{
checkBatchConsistency(list(cbind(1:5), cbind(1:5, 2:6)))
try( # fails
checkBatchConsistency(list(cbind(1:5), cbind(1:4, 2:5)))
)
}
\seealso{
\code{\link{divideIntoBatches}}
}
\author{
Aaron Lun
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_describe_portfolio}
\alias{servicecatalog_describe_portfolio}
\title{Gets information about the specified portfolio}
\usage{
servicecatalog_describe_portfolio(AcceptLanguage, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The portfolio identifier.}
}
\description{
Gets information about the specified portfolio.
A delegated admin is authorized to invoke this command.
}
\section{Request syntax}{
\preformatted{svc$describe_portfolio(
AcceptLanguage = "string",
Id = "string"
)
}
}
\keyword{internal}
| /cran/paws.management/man/servicecatalog_describe_portfolio.Rd | permissive | sanchezvivi/paws | R | false | true | 770 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_describe_portfolio}
\alias{servicecatalog_describe_portfolio}
\title{Gets information about the specified portfolio}
\usage{
servicecatalog_describe_portfolio(AcceptLanguage, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The portfolio identifier.}
}
\description{
Gets information about the specified portfolio.
A delegated admin is authorized to invoke this command.
}
\section{Request syntax}{
\preformatted{svc$describe_portfolio(
AcceptLanguage = "string",
Id = "string"
)
}
}
\keyword{internal}
|
##' @importFrom ape as.phylo
##' @export
ape::as.phylo
##' @method as.phylo tbl_df
##' @importFrom tibble as_tibble
##' @importFrom dplyr mutate_if
##' @export
as.phylo.tbl_df <- function(x, branch.length, label, ...) {
x <- as_tibble(x) %>% mutate_if(is.factor, as.character)
branch.length <- rlang::enquo(branch.length)
label <- rlang::enquo(label)
length_var <- root.edge <- edge.length <- NULL
tip.label <- node.label <- labels <- NULL
if (nrow(unique(x[, 1])) > nrow(unique(x[,2]))){
x %<>% dplyr::select(rev(seq_len(2)), seq_len(ncol(x)))
}
if (!rlang::quo_is_missing(branch.length)){
edge.length <- as.numeric(x %>% dplyr::pull(!!branch.length))
length_var <- rlang::as_name(branch.length)
}
if (!rlang::quo_is_missing(label)){
labels <- x %>% dplyr::pull(!!label) %>% as.character()
}else{
labels <- x %>% dplyr::pull(2) %>% as.character()
}
edge <- check_edgelist(x)
indx <- attr(edge, "indx")
if (is.null(indx)){
indx <- c(FALSE, rep(TRUE, nrow(edge)))
labels <- c(unique(edge[,1][!edge[,1] %in% edge[,2]]), labels)
if (!is.null(edge.length)){
edge.length <- c(0, edge.length)
}
}
isTip <- !edge[,2] %in% edge[,1]
index <- rep(NA, length(isTip))
index[isTip] <- seq_len(sum(isTip))
index[!isTip] <- seq(sum(isTip)+2, length(isTip)+1)
mapping <- data.frame(node=index, labelnames=edge[,2], isTip)
parent <- mapping[match(edge[,1], mapping$labelnames), "node"]
child <- mapping[match(edge[,2], mapping$labelnames), 'node']
edge <- as.matrix(cbind(parent, child))
colnames(edge) <- NULL
edge[is.na(edge)] <- sum(isTip) + 1
if (!is.null(edge.length)){
root.edge <- edge.length[!indx]
if (length(root.edge)==0 || is.na(root.edge)){
root.edge <- NULL
}
edge.length <- edge.length[indx]
}
if (!is.null(labels)){
root.label <- labels[!indx]
labels <- labels[indx]
tip.label <- labels[isTip]
node.label <- c(root.label, labels[!isTip])
if (all(is.na(node.label))){
node.label <- NULL
}
}
Nnode <- length(unique(as.vector(edge))) - sum(isTip)
phylo <- list(
edge = edge,
Nnode = Nnode,
tip.label = tip.label,
edge.length = edge.length
)
class(phylo) <- 'phylo'
phylo$root.edge <- root.edge
phylo$node.label <- node.label
attr(phylo, "length_var") <- length_var
return(phylo)
}
##' @method as.phylo data.frame
##' @export
as.phylo.data.frame <- as.phylo.tbl_df
##' @method as.phylo matrix
##' @export
as.phylo.matrix <- as.phylo.tbl_df
##' @method as.phylo phylo4
##' @export
as.phylo.phylo4 <- function(x, ...) {
edge <- x@edge
edge.filter <- edge[,1] != 0
edge <- edge[edge.filter, ]
edge.length <- x@edge.length
edge.length <- edge.length[edge.filter]
tip.id <- sort(setdiff(edge[,2], edge[,1]))
tip.label <- x@label[tip.id]
phylo <- list(edge = edge,
edge.length = edge.length,
tip.label = tip.label)
node.id <- sort(unique(edge[,1]))
node.id <- node.id[node.id != 0]
node.label <- x@label[node.id]
if (!all(is.na(node.label))) {
phylo$node.label <- node.label
}
phylo$Nnode <- length(node.id)
class(phylo) <- "phylo"
return(phylo)
}
##' @method as.phylo pvclust
##' @export
as.phylo.pvclust <- function(x, ...) {
as.phylo.hclust_node(x$hclust, ...)
}
##' @method as.phylo ggtree
##' @export
as.phylo.ggtree <- function(x, ...) {
d <- as_tibble(x$data)
class(d) <- c("tbl_tree", "tbl_df", "tbl", "data.frame")
as.phylo(d)
}
##' @method as.phylo igraph
##' @export
as.phylo.igraph <- function(x, ...) {
edge <- igraph::get.edgelist(x)
as.phylo(edge)
}
##' @method as.phylo list
##' @export
as.phylo.list <- function(x, ...){
max.depth <- purrr::vec_depth(x)
while(max.depth > 1){
trash = try(silent = TRUE,
expr = {
x <- purrr::map_depth(x, max.depth - 1, paste_nested_list)
}
)
max.depth <- max.depth - 1
}
x <- lapply(x, .paste0_)
x <- .paste1_(x)
x <- paste0(x, collapse=',')
x <- paste0('(', x, ');')
x <- ape::read.tree(text = x)
return(x)
}
paste_nested_list <- function(x){
if (length(x)>1){
if (length(names(x))!=0){
x <- paste0(paste0(x, names(x)), collapse=',')
}else{
x <- paste0("(", paste0(x, collapse=','), ")")
}
}else{
if (!(grepl("^\\(", x) && grepl("\\)$", x))){
x <- paste0('(', x, ')', names(x))
}else{
x <- paste0(x, names(x))
}
}
return(x)
}
.paste0_ <- function(x){
flag <- grepl("^\\(\\(", x) && grepl("\\)\\)$", x)
if (flag){
x <- gsub("^\\(\\(", "\\(", x)
x <- gsub('\\)\\)$', "\\)", x)
}else{
x <- paste0('(', x, ')', names(x), collapse=',')
}
return(x)
}
.paste1_ <- function(x){
index <- grepl('\\),', x)
if (any(index)){
x[index] <- paste0("(", x[index],")", names(x[index]))
x[!index] <- paste0(x[!index], names(x[!index]))
}else{
x <- paste0(x, names(x))
}
return(x)
}
##' access phylo slot
##'
##'
##' @title get.tree
##' @param x tree object
##' @param ... additional parameters
##' @return phylo object
##' @export
##' @author Guangchuang Yu
get.tree <- function(x, ...) {
as.phylo(x, ...)
}
| /R/method-as-phylo.R | no_license | xiangpin/treeio | R | false | false | 5,643 | r | ##' @importFrom ape as.phylo
##' @export
ape::as.phylo
##' @method as.phylo tbl_df
##' @importFrom tibble as_tibble
##' @importFrom dplyr mutate_if
##' @export
as.phylo.tbl_df <- function(x, branch.length, label, ...) {
x <- as_tibble(x) %>% mutate_if(is.factor, as.character)
branch.length <- rlang::enquo(branch.length)
label <- rlang::enquo(label)
length_var <- root.edge <- edge.length <- NULL
tip.label <- node.label <- labels <- NULL
if (nrow(unique(x[, 1])) > nrow(unique(x[,2]))){
x %<>% dplyr::select(rev(seq_len(2)), seq_len(ncol(x)))
}
if (!rlang::quo_is_missing(branch.length)){
edge.length <- as.numeric(x %>% dplyr::pull(!!branch.length))
length_var <- rlang::as_name(branch.length)
}
if (!rlang::quo_is_missing(label)){
labels <- x %>% dplyr::pull(!!label) %>% as.character()
}else{
labels <- x %>% dplyr::pull(2) %>% as.character()
}
edge <- check_edgelist(x)
indx <- attr(edge, "indx")
if (is.null(indx)){
indx <- c(FALSE, rep(TRUE, nrow(edge)))
labels <- c(unique(edge[,1][!edge[,1] %in% edge[,2]]), labels)
if (!is.null(edge.length)){
edge.length <- c(0, edge.length)
}
}
isTip <- !edge[,2] %in% edge[,1]
index <- rep(NA, length(isTip))
index[isTip] <- seq_len(sum(isTip))
index[!isTip] <- seq(sum(isTip)+2, length(isTip)+1)
mapping <- data.frame(node=index, labelnames=edge[,2], isTip)
parent <- mapping[match(edge[,1], mapping$labelnames), "node"]
child <- mapping[match(edge[,2], mapping$labelnames), 'node']
edge <- as.matrix(cbind(parent, child))
colnames(edge) <- NULL
edge[is.na(edge)] <- sum(isTip) + 1
if (!is.null(edge.length)){
root.edge <- edge.length[!indx]
if (length(root.edge)==0 || is.na(root.edge)){
root.edge <- NULL
}
edge.length <- edge.length[indx]
}
if (!is.null(labels)){
root.label <- labels[!indx]
labels <- labels[indx]
tip.label <- labels[isTip]
node.label <- c(root.label, labels[!isTip])
if (all(is.na(node.label))){
node.label <- NULL
}
}
Nnode <- length(unique(as.vector(edge))) - sum(isTip)
phylo <- list(
edge = edge,
Nnode = Nnode,
tip.label = tip.label,
edge.length = edge.length
)
class(phylo) <- 'phylo'
phylo$root.edge <- root.edge
phylo$node.label <- node.label
attr(phylo, "length_var") <- length_var
return(phylo)
}
##' @method as.phylo data.frame
##' @export
as.phylo.data.frame <- as.phylo.tbl_df
##' @method as.phylo matrix
##' @export
as.phylo.matrix <- as.phylo.tbl_df
##' @method as.phylo phylo4
##' @export
as.phylo.phylo4 <- function(x, ...) {
edge <- x@edge
edge.filter <- edge[,1] != 0
edge <- edge[edge.filter, ]
edge.length <- x@edge.length
edge.length <- edge.length[edge.filter]
tip.id <- sort(setdiff(edge[,2], edge[,1]))
tip.label <- x@label[tip.id]
phylo <- list(edge = edge,
edge.length = edge.length,
tip.label = tip.label)
node.id <- sort(unique(edge[,1]))
node.id <- node.id[node.id != 0]
node.label <- x@label[node.id]
if (!all(is.na(node.label))) {
phylo$node.label <- node.label
}
phylo$Nnode <- length(node.id)
class(phylo) <- "phylo"
return(phylo)
}
##' @method as.phylo pvclust
##' @export
as.phylo.pvclust <- function(x, ...) {
as.phylo.hclust_node(x$hclust, ...)
}
##' @method as.phylo ggtree
##' @export
as.phylo.ggtree <- function(x, ...) {
d <- as_tibble(x$data)
class(d) <- c("tbl_tree", "tbl_df", "tbl", "data.frame")
as.phylo(d)
}
##' @method as.phylo igraph
##' @export
as.phylo.igraph <- function(x, ...) {
edge <- igraph::get.edgelist(x)
as.phylo(edge)
}
##' @method as.phylo list
##' @export
as.phylo.list <- function(x, ...){
max.depth <- purrr::vec_depth(x)
while(max.depth > 1){
trash = try(silent = TRUE,
expr = {
x <- purrr::map_depth(x, max.depth - 1, paste_nested_list)
}
)
max.depth <- max.depth - 1
}
x <- lapply(x, .paste0_)
x <- .paste1_(x)
x <- paste0(x, collapse=',')
x <- paste0('(', x, ');')
x <- ape::read.tree(text = x)
return(x)
}
paste_nested_list <- function(x){
if (length(x)>1){
if (length(names(x))!=0){
x <- paste0(paste0(x, names(x)), collapse=',')
}else{
x <- paste0("(", paste0(x, collapse=','), ")")
}
}else{
if (!(grepl("^\\(", x) && grepl("\\)$", x))){
x <- paste0('(', x, ')', names(x))
}else{
x <- paste0(x, names(x))
}
}
return(x)
}
.paste0_ <- function(x){
flag <- grepl("^\\(\\(", x) && grepl("\\)\\)$", x)
if (flag){
x <- gsub("^\\(\\(", "\\(", x)
x <- gsub('\\)\\)$', "\\)", x)
}else{
x <- paste0('(', x, ')', names(x), collapse=',')
}
return(x)
}
.paste1_ <- function(x){
index <- grepl('\\),', x)
if (any(index)){
x[index] <- paste0("(", x[index],")", names(x[index]))
x[!index] <- paste0(x[!index], names(x[!index]))
}else{
x <- paste0(x, names(x))
}
return(x)
}
##' access phylo slot
##'
##'
##' @title get.tree
##' @param x tree object
##' @param ... additional parameters
##' @return phylo object
##' @export
##' @author Guangchuang Yu
get.tree <- function(x, ...) {
as.phylo(x, ...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calendar.R
\name{calendar-holidays-weekdays}
\alias{calendar-holidays-weekdays}
\alias{holidays}
\alias{holidays.default}
\alias{holidays.Calendar}
\alias{holidays.character}
\alias{weekdays.default}
\alias{weekdays.Calendar}
\alias{weekdays.character}
\title{Calendar's holidays and weekdays}
\usage{
holidays(cal)
\method{holidays}{default}(cal)
\method{holidays}{Calendar}(cal)
\method{holidays}{character}(cal)
\method{weekdays}{default}(x, ...)
\method{weekdays}{Calendar}(x, ...)
\method{weekdays}{character}(x, ...)
}
\arguments{
\item{cal}{character with calendar name or the calendar object}
\item{x}{character with calendar name or the calendar object}
\item{...}{unused argument (this exists to keep compliance with
\code{weekdays} generic)}
}
\description{
Returns calendar's list of holidays and weekdays
}
\examples{
holidays("actual")
weekdays("actual")
# empty calls return the default calendar attributes
holidays()
weekdays()
}
| /man/calendar-holidays-weekdays.Rd | no_license | cran/bizdays | R | false | true | 1,077 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calendar.R
\name{calendar-holidays-weekdays}
\alias{calendar-holidays-weekdays}
\alias{holidays}
\alias{holidays.default}
\alias{holidays.Calendar}
\alias{holidays.character}
\alias{weekdays.default}
\alias{weekdays.Calendar}
\alias{weekdays.character}
\title{Calendar's holidays and weekdays}
\usage{
holidays(cal)
\method{holidays}{default}(cal)
\method{holidays}{Calendar}(cal)
\method{holidays}{character}(cal)
\method{weekdays}{default}(x, ...)
\method{weekdays}{Calendar}(x, ...)
\method{weekdays}{character}(x, ...)
}
\arguments{
\item{cal}{character with calendar name or the calendar object}
\item{x}{character with calendar name or the calendar object}
\item{...}{unused argument (this exists to keep compliance with
\code{weekdays} generic)}
}
\description{
Returns calendar's list of holidays and weekdays
}
\examples{
holidays("actual")
weekdays("actual")
# empty calls return the default calendar attributes
holidays()
weekdays()
}
|
# some random examples
p2 <- ggplot()
p2 + geom_point(data = df,aes(x=cu,y=zn)) +
geom_line(data = df,aes(x=cu,y=zn))
p3 <- ggplot(data = df,aes(x=cu,y=zn))
p3 + geom_point() +
geom_line()
ggplot() + geom_point(data = df,aes(x=cu,y=zn)) +
facet_grid(vars(landuse), vars(texture),scales = "free")
ggplot(df)+
geom_point(aes(cu,zn,color=landuse))+
facet_wrap(~landuse)+#,scales = "free_y"
ggtitle("This is a great title!")+
labs(x = "Cu [mg/kg]",y = "Zn [mg/kg]")+
theme_classic(base_size = 20)+
theme(legend.position = "bottom")
ggplot(df,aes(y = cu,x = landuse,fill = landuse))+
geom_boxplot()+
scale_fill_brewer(palette = "BrBG")
ggplot(df,aes(y = cu,x = landuse,fill = landuse))+
geom_boxplot()+
scale_fill_manual(values = c("yellow","blue"))
ggplot(df,aes(y = cu,x = landuse,color = landuse))+
geom_boxplot()+
scale_color_manual(values = c("yellow","blue"))
| /script/more_examples_plotting.R | no_license | GiulioGenova/RCourse | R | false | false | 904 | r | # some random examples
p2 <- ggplot()
p2 + geom_point(data = df,aes(x=cu,y=zn)) +
geom_line(data = df,aes(x=cu,y=zn))
p3 <- ggplot(data = df,aes(x=cu,y=zn))
p3 + geom_point() +
geom_line()
ggplot() + geom_point(data = df,aes(x=cu,y=zn)) +
facet_grid(vars(landuse), vars(texture),scales = "free")
ggplot(df)+
geom_point(aes(cu,zn,color=landuse))+
facet_wrap(~landuse)+#,scales = "free_y"
ggtitle("This is a great title!")+
labs(x = "Cu [mg/kg]",y = "Zn [mg/kg]")+
theme_classic(base_size = 20)+
theme(legend.position = "bottom")
ggplot(df,aes(y = cu,x = landuse,fill = landuse))+
geom_boxplot()+
scale_fill_brewer(palette = "BrBG")
ggplot(df,aes(y = cu,x = landuse,fill = landuse))+
geom_boxplot()+
scale_fill_manual(values = c("yellow","blue"))
ggplot(df,aes(y = cu,x = landuse,color = landuse))+
geom_boxplot()+
scale_color_manual(values = c("yellow","blue"))
|
#' ABAP to occuR
#'
#' @description This function transforms a raw ABAP data frame (returned by \code{\link{getAbapData}})
#' into an list which can be used to fit single-species occupancy models using
#' the package \code{\href{https://github.com/r-glennie/occuR}{occuR}}. This package
#' can fit non-linear effects, including spatial, temporal and spatio-temporal effects
#' using splines.
#' @param abap_data ABAP data downloaded using \code{\link{getAbapData}}.
#' @param occasion A character string indicating the variable in `abap_data`
#' that informs about the season (or occasion, as referred to in occuR). This must always
#' be supplied, although it is really only important in the case of multi-season data.
#' It is typically "year" but could be something else.
#' @param pentads Optional, An `sf` object returned by \code{\link{getRegionPentads}}. Defaults to `NULL`.
#' @param proj_coords logical value indicating whether pentad coordinates are
#' projected (`TRUE`) or kept in decimal degree format (`FALSE`). Defaults to `TRUE`.
#' See details below for coordinate reference system used during transformation.
#'
#' @return A list containing data necessary for model fitting in `occuR`.
#' List elements are: visit_data, a data frame containing information about individual
#' visits; site_data, a data frame containing information about sites and seasons.
#' If `pentads` are given then the coordinates of the centroid of the pentads will be
#' included in site_data.
#'
#' @details The \code{\href{https://github.com/r-glennie/occuR}{occuR}} package can
#' fit spatial effects, for which we need the spatial location of our sites. Within the context
#' of ABAP, these locations are the centroid of each sampling pentad. In order to
#' provide these spatial data to this function, simply use \code{\link{getRegionPentads}}
#' and provide the same inputs for `.region_type` and `.region` that are specified
#' in corresponding \code{\link{getAbapData}} call. If proj_coords is set to `TRUE`
#' then the coordinates will be transformed using the African Albers Equal Area
#' coordinate system (see \href{https://spatialreference.org/ref/esri/africa-albers-equal-area-conic/}{here}
#' for details). This projection is best suited for land masses extending in an
#' east-to-west orientation at mid-latitudes making it suitable for projecting
#' pentads in Southern Africa. \cr
#'
#' In addition to reformatting the detection/non-detection ABAP data for use in
#' `occuR` occupancy models, this function also extracts two survey-level
#' covariates and adds them to the output list: ` hours` and `jday`. The `hours`
#' variable is the total number of hours spent atlassing which is recorded on the
#' pentad card and `jday` is the Julian day corresponding to the first day of
#' atlassing for that card.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(ABAP)
#' library(rgee)
#' library(ABDtools)
#' library(dplyr)
#'
#' ## Download single-season ABAP data
#' abap_data <- getAbapData(.spp_code = 212,
#' .region_type = "province",
#' .region = "Eastern Cape",
#' .years = 2012)
#'
#' # Download ABAP site spatial data
#' abap_pentads <- getRegionPentads(.region_type = "province",
#' .region = "Eastern Cape")
#'
#' # We will use years as occupancy reference seasons
#' abap_data$year <- format(abap_data$StartDate, "%Y")
#'
#' ## Return list for spatial occupancy model
#' occur_data <- abapToOccuR(abap_data, occasion = "year", abap_pentads)
#' str(occur_data)
#'
#' ## Return list for spatial occupancy model (without coordinate projection)
#' occur_data <- abapToOccuR(abap_data, "year", abap_pentads, proj_coords = FALSE)
#' str(occur_data)
#'
#' ## List for non-spatial occupancy model
#' occur_data <- abapToOccuR(abap_data, "year")
#' str(occur_data)
#'
#' ## Transform multi-season ABAP data into occuR data is just as easy
#' abap_data <- getAbapData(.spp_code = 212,
#' .region_type = "province",
#' .region = "Eastern Cape",
#' .years = 2012:2015)
#'
#' # We will use years as occupancy reference seasons
#' abap_data$year <- format(abap_data$StartDate, "%Y")
#'
#' ## Return list for spatial occupancy model
#' occur_data <- abapToOccuR(abap_data, occasion = "year", abap_pentads)
#' str(occur_data)
#'
#' ### ADD SITE-YEAR VARIABLES ###
#'
#' ## Start up GEE
#' ee_check()
#' ee_Initialize(drive = TRUE)
#'
#' ## Create assetId for pentads of interest
#' assetId <- file.path(ee_get_assethome(), 'EC_pentads')
#'
#' ## Upload to pentads to GEE (only run this once per asset)
#' uploadFeaturesToEE(pentads = abap_pentads,
#' asset_id = assetId,
#' load = FALSE)
#'
#' ## Load the remote asset into R session
#' pentads <- ee$FeatureCollection(assetId)
#'
#' ## Create a multi-band image with mean NDVI for each year
#' ndvi_multiband <- EEcollectionToMultiband(collection = "MODIS/006/MOD13A2",
#' dates = c("2012-01-01", "2015-12-31"),
#' band = "NDVI",
#' group_type = "year",
#' groups = 2012:2015,
#' reducer = "mean",
#' unmask = FALSE)
#'
#' ## Find mean and standard deviation of NDVI value for each pentad and year from the multi-band image
#' ndvi_mean <- addVarEEimage(pentads, ndvi_multiband, "mean")
#' ndvi_sd <- addVarEEimage(pentads, ndvi_multiband, "stdDev")
#'
#' ## Format the data to include the pentad column and GEE values for each year
#' ndvi_mean <- ndvi_mean %>%
#' select(pentad, paste0("NDVI_", as.character(2012:2015)))
#'
#' ndvi_sd <- ndvi_sd %>%
#' select(pentad, paste0("NDVI_", as.character(2012:2015)))
#'
#' ## occuR data have the structure of regular data frames (they are data tables, but
#' ## behave very similarly), so there are many ways we can transfer covariate values from
#' ## other data frame-like objects. Here, we will transfer the value of these variables
#' ## with dplyr::left_join(), but first we need to give covariates a long format with tidyr
#' ndvi_mean_long <- ndvi_mean %>%
#' sf::st_drop_geometry() %>%
#' tidyr::pivot_longer(-pentad, names_to = "year", values_to = "NDVI_mean") %>%
#' mutate(year = gsub("NDVI_", "", year))
#'
#' ndvi_sd_long <- ndvi_sd %>%
#' sf::st_drop_geometry() %>%
#' tidyr::pivot_longer(-pentad, names_to = "year", values_to = "NDVI_sd") %>%
#' mutate(year = gsub("NDVI_", "", year))
#'
#' # Transfer variables via join
#' occur_data_ee <- occur_data$site %>%
#' dplyr::left_join(ndvi_mean_long, by = c("pentad", "year")) %>%
#' dplyr::left_join(ndvi_sd_long, by = c("pentad", "year"))
#'
#' summary(occur_data_ee)
#'
#' }
abapToOccuR <- function(abap_data, occasion, pentads = NULL, proj_coords = TRUE){
if(!requireNamespace("occuR", quietly = TRUE)) {
warning("Package occuR doesn't seem to be installed. We recommend installing it if you are using this function.")
}
# Create visit data
visit_data <- abap_data %>%
dplyr::arrange(Pentad, StartDate) %>%
dplyr::group_by(Pentad) %>%
dplyr::mutate(site = dplyr::cur_group_id()) %>%
dplyr::ungroup()
visit_data <- visit_data %>%
dplyr::group_by(!!rlang::sym(occasion)) %>%
dplyr::group_by(occasion = dplyr::cur_group_id()) %>%
dplyr::ungroup()
visit_data <- visit_data %>%
dplyr::group_by(site) %>%
dplyr::mutate(visit = dplyr::row_number()) %>%
dplyr::ungroup()
visit_data <- visit_data %>%
dplyr::mutate(obs = ifelse(Spp == "-", 0L, 1L))
# Create additional covariates (total observation hours and day of year)
visit_data <- visit_data %>%
dplyr::rename(hours = TotalHours) %>%
dplyr::mutate(jday = lubridate::yday(StartDate))
# Select columns
visit_data <- visit_data %>%
dplyr::select(pentad = Pentad, !!rlang::sym(occasion), site, occasion, visit, obs, hours, jday)
# Create site data
site_data <- visit_data %>%
dplyr::distinct(pentad, year, site, occasion)
# Extract spatial data
if(!is.null(pentads)){
pentad_id <- unique(abap_data$Pentad)
sf::st_agr(pentads) = "constant"
aeaproj <- "+proj=aea +lat_1=20 +lat_2=-23 +lat_0=0 +lon_0=25 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs"
if(isTRUE(proj_coords)){
pentads <- sf::st_transform(pentads, aeaproj)
}
ids <- pentads %>%
dplyr::filter(pentad %in% pentad_id) %>%
dplyr::arrange(match(pentad, site_data$pentad)) %>%
dplyr::pull(pentad)
pentad_xy <- pentads %>%
dplyr::filter(pentad %in% pentad_id) %>%
dplyr::arrange(match(pentad, site_data$pentad)) %>%
sf::st_centroid() %>%
sf::st_coordinates() %>%
as.data.frame() %>%
dplyr::mutate(pentad = ids)
site_data <- site_data %>%
dplyr::left_join(pentad_xy, by = "pentad")
}
return(list(site = data.table::as.data.table(site_data),
visit = data.table::as.data.table(visit_data)))
}
| /R/abapToOccuR.R | permissive | AfricaBirdData/ABAP | R | false | false | 9,394 | r | #' ABAP to occuR
#'
#' @description This function transforms a raw ABAP data frame (returned by \code{\link{getAbapData}})
#' into an list which can be used to fit single-species occupancy models using
#' the package \code{\href{https://github.com/r-glennie/occuR}{occuR}}. This package
#' can fit non-linear effects, including spatial, temporal and spatio-temporal effects
#' using splines.
#' @param abap_data ABAP data downloaded using \code{\link{getAbapData}}.
#' @param occasion A character string indicating the variable in `abap_data`
#' that informs about the season (or occasion, as referred to in occuR). This must always
#' be supplied, although it is really only important in the case of multi-season data.
#' It is typically "year" but could be something else.
#' @param pentads Optional, An `sf` object returned by \code{\link{getRegionPentads}}. Defaults to `NULL`.
#' @param proj_coords logical value indicating whether pentad coordinates are
#' projected (`TRUE`) or kept in decimal degree format (`FALSE`). Defaults to `TRUE`.
#' See details below for coordinate reference system used during transformation.
#'
#' @return A list containing data necessary for model fitting in `occuR`.
#' List elements are: visit_data, a data frame containing information about individual
#' visits; site_data, a data frame containing information about sites and seasons.
#' If `pentads` are given then the coordinates of the centroid of the pentads will be
#' included in site_data.
#'
#' @details The \code{\href{https://github.com/r-glennie/occuR}{occuR}} package can
#' fit spatial effects, for which we need the spatial location of our sites. Within the context
#' of ABAP, these locations are the centroid of each sampling pentad. In order to
#' provide these spatial data to this function, simply use \code{\link{getRegionPentads}}
#' and provide the same inputs for `.region_type` and `.region` that are specified
#' in corresponding \code{\link{getAbapData}} call. If proj_coords is set to `TRUE`
#' then the coordinates will be transformed using the African Albers Equal Area
#' coordinate system (see \href{https://spatialreference.org/ref/esri/africa-albers-equal-area-conic/}{here}
#' for details). This projection is best suited for land masses extending in an
#' east-to-west orientation at mid-latitudes making it suitable for projecting
#' pentads in Southern Africa. \cr
#'
#' In addition to reformatting the detection/non-detection ABAP data for use in
#' `occuR` occupancy models, this function also extracts two survey-level
#' covariates and adds them to the output list: ` hours` and `jday`. The `hours`
#' variable is the total number of hours spent atlassing which is recorded on the
#' pentad card and `jday` is the Julian day corresponding to the first day of
#' atlassing for that card.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(ABAP)
#' library(rgee)
#' library(ABDtools)
#' library(dplyr)
#'
#' ## Download single-season ABAP data
#' abap_data <- getAbapData(.spp_code = 212,
#' .region_type = "province",
#' .region = "Eastern Cape",
#' .years = 2012)
#'
#' # Download ABAP site spatial data
#' abap_pentads <- getRegionPentads(.region_type = "province",
#' .region = "Eastern Cape")
#'
#' # We will use years as occupancy reference seasons
#' abap_data$year <- format(abap_data$StartDate, "%Y")
#'
#' ## Return list for spatial occupancy model
#' occur_data <- abapToOccuR(abap_data, occasion = "year", abap_pentads)
#' str(occur_data)
#'
#' ## Return list for spatial occupancy model (without coordinate projection)
#' occur_data <- abapToOccuR(abap_data, "year", abap_pentads, proj_coords = FALSE)
#' str(occur_data)
#'
#' ## List for non-spatial occupancy model
#' occur_data <- abapToOccuR(abap_data, "year")
#' str(occur_data)
#'
#' ## Transform multi-season ABAP data into occuR data is just as easy
#' abap_data <- getAbapData(.spp_code = 212,
#' .region_type = "province",
#' .region = "Eastern Cape",
#' .years = 2012:2015)
#'
#' # We will use years as occupancy reference seasons
#' abap_data$year <- format(abap_data$StartDate, "%Y")
#'
#' ## Return list for spatial occupancy model
#' occur_data <- abapToOccuR(abap_data, occasion = "year", abap_pentads)
#' str(occur_data)
#'
#' ### ADD SITE-YEAR VARIABLES ###
#'
#' ## Start up GEE
#' ee_check()
#' ee_Initialize(drive = TRUE)
#'
#' ## Create assetId for pentads of interest
#' assetId <- file.path(ee_get_assethome(), 'EC_pentads')
#'
#' ## Upload to pentads to GEE (only run this once per asset)
#' uploadFeaturesToEE(pentads = abap_pentads,
#' asset_id = assetId,
#' load = FALSE)
#'
#' ## Load the remote asset into R session
#' pentads <- ee$FeatureCollection(assetId)
#'
#' ## Create a multi-band image with mean NDVI for each year
#' ndvi_multiband <- EEcollectionToMultiband(collection = "MODIS/006/MOD13A2",
#' dates = c("2012-01-01", "2015-12-31"),
#' band = "NDVI",
#' group_type = "year",
#' groups = 2012:2015,
#' reducer = "mean",
#' unmask = FALSE)
#'
#' ## Find mean and standard deviation of NDVI value for each pentad and year from the multi-band image
#' ndvi_mean <- addVarEEimage(pentads, ndvi_multiband, "mean")
#' ndvi_sd <- addVarEEimage(pentads, ndvi_multiband, "stdDev")
#'
#' ## Format the data to include the pentad column and GEE values for each year
#' ndvi_mean <- ndvi_mean %>%
#' select(pentad, paste0("NDVI_", as.character(2012:2015)))
#'
#' ndvi_sd <- ndvi_sd %>%
#' select(pentad, paste0("NDVI_", as.character(2012:2015)))
#'
#' ## occuR data have the structure of regular data frames (they are data tables, but
#' ## behave very similarly), so there are many ways we can transfer covariate values from
#' ## other data frame-like objects. Here, we will transfer the value of these variables
#' ## with dplyr::left_join(), but first we need to give covariates a long format with tidyr
#' ndvi_mean_long <- ndvi_mean %>%
#' sf::st_drop_geometry() %>%
#' tidyr::pivot_longer(-pentad, names_to = "year", values_to = "NDVI_mean") %>%
#' mutate(year = gsub("NDVI_", "", year))
#'
#' ndvi_sd_long <- ndvi_sd %>%
#' sf::st_drop_geometry() %>%
#' tidyr::pivot_longer(-pentad, names_to = "year", values_to = "NDVI_sd") %>%
#' mutate(year = gsub("NDVI_", "", year))
#'
#' # Transfer variables via join
#' occur_data_ee <- occur_data$site %>%
#' dplyr::left_join(ndvi_mean_long, by = c("pentad", "year")) %>%
#' dplyr::left_join(ndvi_sd_long, by = c("pentad", "year"))
#'
#' summary(occur_data_ee)
#'
#' }
abapToOccuR <- function(abap_data, occasion, pentads = NULL, proj_coords = TRUE){
if(!requireNamespace("occuR", quietly = TRUE)) {
warning("Package occuR doesn't seem to be installed. We recommend installing it if you are using this function.")
}
# Create visit data
visit_data <- abap_data %>%
dplyr::arrange(Pentad, StartDate) %>%
dplyr::group_by(Pentad) %>%
dplyr::mutate(site = dplyr::cur_group_id()) %>%
dplyr::ungroup()
visit_data <- visit_data %>%
dplyr::group_by(!!rlang::sym(occasion)) %>%
dplyr::group_by(occasion = dplyr::cur_group_id()) %>%
dplyr::ungroup()
visit_data <- visit_data %>%
dplyr::group_by(site) %>%
dplyr::mutate(visit = dplyr::row_number()) %>%
dplyr::ungroup()
visit_data <- visit_data %>%
dplyr::mutate(obs = ifelse(Spp == "-", 0L, 1L))
# Create additional covariates (total observation hours and day of year)
visit_data <- visit_data %>%
dplyr::rename(hours = TotalHours) %>%
dplyr::mutate(jday = lubridate::yday(StartDate))
# Select columns
visit_data <- visit_data %>%
dplyr::select(pentad = Pentad, !!rlang::sym(occasion), site, occasion, visit, obs, hours, jday)
# Create site data
site_data <- visit_data %>%
dplyr::distinct(pentad, year, site, occasion)
# Extract spatial data
if(!is.null(pentads)){
pentad_id <- unique(abap_data$Pentad)
sf::st_agr(pentads) = "constant"
aeaproj <- "+proj=aea +lat_1=20 +lat_2=-23 +lat_0=0 +lon_0=25 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs"
if(isTRUE(proj_coords)){
pentads <- sf::st_transform(pentads, aeaproj)
}
ids <- pentads %>%
dplyr::filter(pentad %in% pentad_id) %>%
dplyr::arrange(match(pentad, site_data$pentad)) %>%
dplyr::pull(pentad)
pentad_xy <- pentads %>%
dplyr::filter(pentad %in% pentad_id) %>%
dplyr::arrange(match(pentad, site_data$pentad)) %>%
sf::st_centroid() %>%
sf::st_coordinates() %>%
as.data.frame() %>%
dplyr::mutate(pentad = ids)
site_data <- site_data %>%
dplyr::left_join(pentad_xy, by = "pentad")
}
return(list(site = data.table::as.data.table(site_data),
visit = data.table::as.data.table(visit_data)))
}
|
library(monocle)
library(xacHelper)
library(grid)
library(mcclust)
library(plyr)
#
# # load('./RData/na_sim_data_robustness_dpt_slicer_wishbone.RData')
#
##########################################################################################################################################################################
# perform the accuracy analysis with the true pseudotime and branch
##########################################################################################################################################################################
# ICA_cds_downsampled_cells_ordered_0.8
# cds_downsampled_cells_ordered_0.8
neuron_cells <- colnames(absolute_cds)[1:400]
astrocyte_cells <- colnames(absolute_cds)[401:800]
root_state <- row.names(subset(pData(absolute_cds), State == 1))
reorder_cell_cds <- function (cds, root_state) {
#determine the mapping between original state 1/2/3 and new state 1/2/3:
overlap_state_1 <- length(intersect(row.names(pData(cds)[pData(cds)$State == 1, ]), root_state))
overlap_state_2 <- length(intersect(row.names(pData(cds)[pData(cds)$State == 2, ]), root_state))
overlap_state_3 <- length(intersect(row.names(pData(cds)[pData(cds)$State == 3, ]), root_state))
#find the state corresponding to the original root state
overlap_vec <- c(overlap_state_1, overlap_state_2, overlap_state_3)
max_ind <- which(overlap_vec == max(overlap_vec))
if(0 %in% pData(cds)[pData(cds)$State == max_ind, 'Pseudotime']) #avoid recalculation of the Pseudotime
return(cds)
cds = orderCells(cds, root_state = max_ind)
if(length(unique(pData(cds)$State)) > 3)
cds <- trimTree(cds)
print('pass') #
return(cds)
}
cds_downsampled_cells_ordered_0.8_update <- lapply(cds_downsampled_cells_ordered_0.8, reorder_cell_cds, root_state)
cds_downsampled_cells_ordered_update <- lapply(cds_downsampled_cells_ordered, reorder_cell_cds, root_state)
pairwise_cal_benchmark_res <- function(cds_1, cds_2) {
overlpa_cells <- intersect(colnames(cds_1), colnames(cds_2))
if(length(unique(pData(cds_1)$State)) > 3){
cds_1 <- trimTree(cds_1, num_paths = 2, min_branch_thrsld = 0.1)
}
if(length(unique(pData(cds_2)$State)) > 3){
cds_2 <- trimTree(cds_2, num_paths = 2, min_branch_thrsld = 0.1)
}
overlpa_cells <- intersect(colnames(cds_1), colnames(cds_2))
neuron_t_1 <- pData(cds_1[, intersect(overlpa_cells, neuron_cells)])$Pseudotime
neuron_t_2 <- pData(cds_2[, intersect(overlpa_cells, neuron_cells)])$Pseudotime
astrocyte_t_1 <- pData(cds_1[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime
astrocyte_t_2 <- pData(cds_2[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
#branch assignment:
clusters_1 <- as.character(pData(cds_1[, overlpa_cells])$State)
clusters_2 <- as.character(pData(cds_2[, overlpa_cells])$State)
ClusteringMetrics_res <- calClusteringMetrics(as.character(clusters_1), as.character(clusters_2))
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
}
# absolute_cds <- cds_downsampled_cells_ordered_update[[36]]
monocle2_benchmark_res_list <- lapply(cds_downsampled_cells_ordered_0.8_update, function(x) pairwise_cal_benchmark_res(x, absolute_cds)) #
progressive_monocle2_benchmark_res_list <- lapply(cds_downsampled_cells_ordered_update, function(x) pairwise_cal_benchmark_res(x, absolute_cds)) #
monocle_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(monocle2_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cluster[3, 1]))
)
progressive_monocle_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cluster[3, 1]))
)
#dpt_cds_downsampled_cells_ordered_0.8
dpt_benchmark_res_list <- lapply(1:length(dpt_cds_downsampled_cells_ordered_0.8), function(x) {
dpt_res <- dpt_cds_downsampled_cells_ordered_0.8[[x]]
overlap_cells <- intersect(names(dpt_res$pt), colnames(absolute_cds))
if(length(overlap_cells)) {
neuron_t_1 <- dpt_res$pt[intersect(overlpa_cells, neuron_cells)]#[overlpa_cells, 'DPT']
neuron_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, neuron_cells)])$Pseudotime#[overlpa_cells, 'DPT']
astrocyte_t_1 <- dpt_res$pt[intersect(overlpa_cells, astrocyte_cells)]#[overlpa_cells, 'DPT']
astrocyte_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime#[overlpa_cells, 'DPT']
# cor_res.0 <- cor(t_1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.1 <- cor(t_1.1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.2 <- cor(t_1.2, t_2, method = 'kendall', use = 'pairwise.complete.obs')
#
# cor_res <- max(c(cor_res.0, cor_res.1, cor_res.2))
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
#branch assignment:
# overlpa_cells_update <- overlpa_cells[intersect(which(as.character(dpt_cds_downsampled_cells_ordered[[x[[1]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')),
# which(as.character(dpt_cds_downsampled_cells_ordered[[x[[2]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')))] #remove the unassigned 1,2,3, uncertain 1,2,3, cells
clusters_1 <- as.character(dpt_res$branch[overlap_cells, 1])
clusters_2 <- as.character(pData(absolute_cds[, overlap_cells])$State)
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
}
else
return(list(cor = NA, kendall_tau = NA, cluster = data.frame(randIndex = c(NA, NA, NA), Type = c("rand index", "variation of information", "adjusted rand index")), raw_cor = NA, raw_kendall_tau = NA))
})
progressive_dpt_benchmark_res_list <- lapply(1:length(dpt_cds_downsampled_cells_ordered), function(x) {
dpt_res <- dpt_cds_downsampled_cells_ordered[[x]]
overlap_cells <- intersect(names(dpt_res$pt), colnames(absolute_cds))
if(length(overlap_cells)) {
neuron_t_1 <- dpt_res$pt[intersect(overlpa_cells, neuron_cells)]#[overlpa_cells, 'DPT']
neuron_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, neuron_cells)])$Pseudotime#[overlpa_cells, 'DPT']
astrocyte_t_1 <- dpt_res$pt[intersect(overlpa_cells, astrocyte_cells)]#[overlpa_cells, 'DPT']
astrocyte_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime#[overlpa_cells, 'DPT']
# cor_res.0 <- cor(t_1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.1 <- cor(t_1.1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.2 <- cor(t_1.2, t_2, method = 'kendall', use = 'pairwise.complete.obs')
#
# cor_res <- max(c(cor_res.0, cor_res.1, cor_res.2))
cor_res <- c(cor(neuron_t_1, neuron_t_2, use = "na.or.complete"), cor(astrocyte_t_1, astrocyte_t_2, use = "na.or.complete"))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
#branch assignment:
# overlpa_cells_update <- overlpa_cells[intersect(which(as.character(dpt_cds_downsampled_cells_ordered[[x[[1]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')),
# which(as.character(dpt_cds_downsampled_cells_ordered[[x[[2]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')))] #remove the unassigned 1,2,3, uncertain 1,2,3, cells
clusters_1 <- as.character(dpt_res$branch[overlap_cells, 1])
clusters_2 <- as.character(pData(absolute_cds[, overlap_cells])$State)
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
}
else
return(list(cor = NA, kendall_tau = NA, cluster = data.frame(randIndex = c(NA, NA, NA), Type = c("rand index", "variation of information", "adjusted rand index")), raw_cor = NA, raw_kendall_tau = NA))
})
dpt_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(dpt_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(dpt_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(dpt_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(dpt_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(dpt_benchmark_res_list, function(x) x$cluster[3, 1]))
)
progressive_dpt_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cluster[3, 1]))
)
# wishbone_res
# fraction_wishbone_res
wishbone_benchmark_res_list <- lapply(unique(wishbone_res$run), function(ind) {
message(ind)
subset_wishbone_res <- subset(wishbone_res, run == ind)
row.names(subset_wishbone_res) <- subset_wishbone_res[, 1]
overlpa_cells <- intersect(colnames(absolute_cds), row.names(subset_wishbone_res) )
neuron_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, neuron_cells), 'Pseudotime']
neuron_t_2 <- subset_wishbone_res[intersect(overlpa_cells, neuron_cells), 'trajectory']
astrocyte_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, astrocyte_cells), 'Pseudotime']
astrocyte_t_2 <- subset_wishbone_res[intersect(overlpa_cells, astrocyte_cells), 'trajectory']
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
# print(t_1)
# print(t_2)
if(cor_res < 0 & is.finite(cor_res)){
start_cell_id <- which(neuron_t_1 == min(neuron_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
neuron_t_2_update <- abs(neuron_t_2 - neuron_t_2[start_cell_id])
start_cell_id <- which(astrocyte_t_1 == min(astrocyte_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
astrocyte_t_2_update <- abs(astrocyte_t_2 - astrocyte_t_2[start_cell_id])
cor_res <- c(cor(neuron_t_1, neuron_t_2_update), cor(astrocyte_t_1, astrocyte_t_2_update))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'))
}
#branch assignment:
clusters_1 <- as.character(pData(absolute_cds)[overlpa_cells, 'State'])
clusters_2 <- as.character(subset_wishbone_res[overlpa_cells, 'branch'])
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
})
progressive_wishbone_benchmark_res_list <- lapply(unique(fraction_wishbone_res$run), function(ind) {
message(ind)
subset_wishbone_res <- subset(fraction_wishbone_res, run == ind)
row.names(subset_wishbone_res) <- subset_wishbone_res[, 1]
overlpa_cells <- intersect(colnames(absolute_cds), row.names(subset_wishbone_res) )
neuron_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, neuron_cells), 'Pseudotime']
neuron_t_2 <- subset_wishbone_res[intersect(overlpa_cells, neuron_cells), 'trajectory']
astrocyte_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, astrocyte_cells), 'Pseudotime']
astrocyte_t_2 <- subset_wishbone_res[intersect(overlpa_cells, astrocyte_cells), 'trajectory']
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
# print(t_1)
# print(t_2)
if(any(cor_res < 0) & any(is.finite(cor_res))){
start_cell_id <- which(neuron_t_1 == min(neuron_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
neuron_t_2_update <- abs(neuron_t_2 - neuron_t_2[start_cell_id])
start_cell_id <- which(astrocyte_t_1 == min(astrocyte_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
astrocyte_t_2_update <- abs(astrocyte_t_2 - astrocyte_t_2[start_cell_id])
cor_res <- c(cor(neuron_t_1, neuron_t_2_update), cor(astrocyte_t_1, astrocyte_t_2_update))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'))
}
#branch assignment:
clusters_1 <- as.character(pData(absolute_cds)[overlpa_cells, 'State'])
clusters_2 <- as.character(subset_wishbone_res[overlpa_cells, 'branch'])
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
})
wishbone_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(wishbone_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cluster[3, 1]))
)
progressive_wishbone_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cluster[3, 1]))
)
downsamling_marker_all_sampling_res_df <- Reduce(rbind , list(dpt_sampling_res_df, monocle_sampling_res_df, wishbone_sampling_res_df, dpt_sampling_res_df)) # ICA_sampling_res_df,
downsamling_marker_all_sampling_res_df$Type <- c(rep('dpt', nrow(dpt_sampling_res_df)), rep('monocle2', nrow(monocle_sampling_res_df)), rep('wishbone', nrow(wishbone_sampling_res_df)), rep('monocle1', nrow(dpt_sampling_res_df)))#, rep('Monocle1', 10000)
downsamling_marker_all_sampling_res_df$Type <- factor(downsamling_marker_all_sampling_res_df$Type, levels = c('monocle2', 'monocle1', "dpt", "wishbone")) #dpt (non-uniform branch)
pdf(paste(SI_fig_dir, benchmark_type, 'real_simulation_pearson_rho_comparison_robustness.pdf', sep = ''), width = 1, height = 1.5)
qplot(Type, pearson_rho, data = downsamling_marker_all_sampling_res_df, color = Type, geom = 'boxplot') + ylab("Pearson's Rho") + monocle_theme_opts() + xlab('') +
nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'real_simulation_all_kendall_tau_comparison_robustness.pdf', sep = ''), width = 1, height = 1.5)
qplot(Type, kendall.tau, data = downsamling_marker_all_sampling_res_df, color = Type, geom = 'boxplot') + ylab("Kendall's tau") + monocle_theme_opts() + xlab('') +
nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'real_simulation_ArI_comparison_robustness.pdf', sep = ''), width = 1, height = 1.5)
qplot(Type, adj_rand, data = downsamling_marker_all_sampling_res_df, color = Type, geom = 'boxplot') + ylab("Adjusted Rand index") + monocle_theme_opts() + xlab('') +
nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
progressive_all_valid_cell_sampling_res_df <- Reduce(rbind , list(progressive_dpt_sampling_res_df, progressive_monocle_sampling_res_df, progressive_wishbone_sampling_res_df, progressive_dpt_sampling_res_df)) # ICA_sampling_res_df,
progressive_all_valid_cell_sampling_res_df$proportion <- c(rep(downsampled_proportions, 2), names(cds_downsampled_cells_ordered)[unique(fraction_wishbone_res$run)], downsampled_proportions)
progressive_all_valid_cell_sampling_res_df$Type <- c(rep('dpt', 36), rep('monocle2', 36), rep('wishbone', length(unique(fraction_wishbone_res$run))), rep('monocle1', 36))
progressive_all_valid_cell_sampling_res_df$Type <- factor(progressive_all_valid_cell_sampling_res_df$Type, levels = c('monocle2', 'monocle1', "dpt", "wishbone"))
progressive_all_valid_cell_sampling_res_df$se <- 0.1
pdf(paste(SI_fig_dir, benchmark_type, 'pearson_real_simulation_rho_comparison_cell_downsampling.pdf', sep = ''), width = 2.5, height = 2)
qplot(proportion, abs(progressive_all_valid_cell_sampling_res_df$pearson_rho), data = progressive_all_valid_cell_sampling_res_df, color = Type, size = 1, geom = 'boxplot') +
xlab('Proportion of original cells') + ylab("Pearson's Rho") + nm_theme() + scale_size(range = c(0.1, 1)) + monocle_theme_opts() + ylim(0, 1) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'rand_index_comparison_real_simulation_downsampling.pdf', sep = ''), width = 2.5, height = 2)
qplot(proportion, abs(progressive_all_valid_cell_sampling_res_df$adj_rand), data = progressive_all_valid_cell_sampling_res_df, color = Type, size = 1, geom = 'boxplot') +
xlab('Proportion of original cells') + ylab("Adjusted rand index") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1)
dev.off()
progressive_process_cell_sampling_res_df <- ddply(progressive_all_valid_cell_sampling_res_df, .(Type, proportion), summarize,
mean_kendall.tau = mean(abs(kendall.tau), na.rm = T),
sd_kendall.tau = sd(abs(kendall.tau), na.rm = T),
mean_pearson_rho = mean(abs(pearson_rho), na.rm = T),
sd_pearson_rho = sd(abs(pearson_rho), na.rm = T),
mean_adj_rand = mean(abs(adj_rand), na.rm = T),
sd_adj_rand = sd(abs(adj_rand), na.rm = T),
se = mean(se))
limits <- aes(ymax = mean_adj_rand + sd_adj_rand, ymin=mean_adj_rand - sd_adj_rand)
pdf(paste(SI_fig_dir, benchmark_type, 'rand_index_real_simulation_comparison_cell_downsampling2.pdf', sep = ''), width = 2.5, height = 2)
ggplot(aes(proportion, mean_adj_rand), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.1), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_adj_rand + sd_adj_rand, ymin=mean_adj_rand - sd_adj_rand), position=position_dodge(width=0.9), size = 0.5) +
xlab('Proportion of original cells') + ylab("Adjusted Rand index") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'pearson_rho_real_simulation_comparison_cell_downsampling2.pdf', sep = ''), width = 2.5, height = 2)
ggplot(aes(proportion, mean_pearson_rho), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.9), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_pearson_rho + sd_pearson_rho, ymin=mean_pearson_rho - sd_pearson_rho), position=position_dodge(width=0.1), size = 0.5) +
xlab('Proportion of original cells') + ylab("Pearson's Rho") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'kendall_tau_real_simulation_comparison_cell_downsampling2.pdf', sep = ''), width = 2.5, height = 2)
ggplot(aes(proportion, mean_kendall.tau), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.9), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_kendall.tau + sd_kendall.tau, ymin=mean_kendall.tau - sd_kendall.tau), position=position_dodge(width=0.1), size = 0.5) +
xlab('Proportion of original cells') + ylab("Kendall's Tau") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'kendall_tau_real_simulation_comparison_cell_downsampling2_cole.pdf', sep = ''), width = 3, height = 1)
ggplot(aes(proportion, mean_kendall.tau), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.9), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_kendall.tau + sd_kendall.tau, ymin=mean_kendall.tau - sd_kendall.tau), position=position_dodge(width=0.1), size = 0.5) +
xlab('Proportion of original cells') + ylab("Kendall's Tau") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
##########################################################################################################################################################################
# save the dataset
##########################################################################################################################################################################
save.image(paste('./RData/', benchmark_type, '_real_simulation_na.RData', sep = ''))
##########################################################################################################################################################################
# use diffusion distance to perform the trajectory reconstruction
##########################################################################################################################################################################
library(monocle)
library(destiny)
library(mcclust)
library(plyr)
extract_ddrtree_ordering_xj <- function(dp_mst, dp = dp, root_cell, verbose=T) # dp,
{
nei <- NULL
type <- NULL
pseudo_time <- NULL
curr_state <- 1
res <- list(subtree = dp_mst, root = root_cell)
states = rep(1, ncol(dp))
names(states) <- V(dp_mst)$name
pseudotimes = rep(0, ncol(dp))
names(pseudotimes) <- V(dp_mst)$name
parents = rep(NA, ncol(dp))
names(parents) <- V(dp_mst)$name
mst_traversal <- graph.dfs(dp_mst,
root=root_cell,
neimode = "all",
unreachable=FALSE,
father=TRUE)
mst_traversal$father <- as.numeric(mst_traversal$father)
curr_state <- 1
state_stat <- table(degree(dp_mst)[degree(dp_mst) > 2])
state_total_num <- sum(state_stat * 2:(length(state_stat) + 1))
node_num <- state_total_num + 2
state_mst <- make_empty_graph(n = node_num) #number of states
state_mst <- add_edges(state_mst, c(node_num, 1))
for (i in 1:length(mst_traversal$order)){
curr_node = mst_traversal$order[i]
curr_node_name = V(dp_mst)[curr_node]$name
if (is.na(mst_traversal$father[curr_node]) == FALSE){
parent_node = mst_traversal$father[curr_node]
parent_node_name = V(dp_mst)[parent_node]$name
parent_node_pseudotime = pseudotimes[parent_node_name]
parent_node_state = states[parent_node_name]
curr_node_pseudotime = parent_node_pseudotime + dp[curr_node_name, parent_node_name]
if (degree(dp_mst, v=parent_node_name) > 2){
curr_state <- curr_state + 1
# if(curr_state >= 1405){
# # browser()
# }
message('current state is ', curr_state, 'parent state is ', parent_node_state)
state_mst <- add_edges(state_mst, c(parent_node_state, curr_state))
}
}else{
parent_node = NA
parent_node_name = NA
curr_node_pseudotime = 0
}
curr_node_state = curr_state
pseudotimes[curr_node_name] <- curr_node_pseudotime
states[curr_node_name] <- curr_node_state
parents[curr_node_name] <- parent_node_name
}
ordering_df <- data.frame(sample_name = names(states),
cell_state = factor(states),
pseudo_time = as.vector(pseudotimes),
parent = parents)
row.names(ordering_df) <- ordering_df$sample_name
# ordering_df <- plyr::arrange(ordering_df, pseudo_time)
E(state_mst)$weight <- c(0.1, table(ordering_df$cell_state))
V(state_mst)$name <- as.character(1:node_num)
state_mst <- as.undirected(state_mst)
return(list(ordering_df = ordering_df, state_mst = state_mst))
state_mst
}
##################################################################################################################################################################
# na simulation dataset (not working below, we really need to have a better method for assigning branches based on some graph operations )
##################################################################################################################################################################
# root_cell <- paste('Y_', test@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[row.names(subset(pData(valid_subset_GSE72857_cds2), Pseudotime == 0)), 1], sep = '')
colnames(test)
test_res <- extract_ddrtree_ordering_xj(test@minSpanningTree, cellPairwiseDistances(test),
root_cell = 1, verbose=T) # 'Cell_2'
next_node <<- 0
dp_mst <- test_res$state_mst
dp_mst <- as.undirected(dp_mst)
dp = distances(dp_mst, v = V(dp_mst), to = V(dp_mst), weights = NULL)
# res <- monocle:::pq_helper(dp_mst, use_weights=T, root_node=2)
res <- pq_helper(dp_mst, use_weights=T, root_node=which(degree(dp_mst) == 1)[1])
if(is.null(branch_num))
branch_num <- sum(degree(dp_mst) > 2) + 1
branch_num <- 2 #6 cell types in the end
order_list <- monocle:::extract_good_branched_ordering(res$subtree, res$root, dp, branch_num, FALSE)
cc_ordering <- order_list$ordering_df
row.names(cc_ordering) <- cc_ordering$sample_name
data_ori <- as.matrix(t(exprs(test)))
data_uniq <- data_ori[!duplicated(data_ori), ]
dm <- DiffusionMap(as.matrix(data_ori))
DPT_res <- DPT(dm)
cell_num <- length(DPT_res$DPT1)
#
# dp <- DPT_res[1:cell_num, 1:cell_num]
# dimnames(dp) <- list(colnames(test)[!duplicated(data_ori)], colnames(test)[!duplicated(data_ori)])
# gp <- graph.adjacency(dp, mode = "undirected", weighted = TRUE)
# dp_mst <- minimum.spanning.tree(gp)
#
# root_cell <- row.names(subset(pData(test), Pseudotime == 0))
# test_res <- extract_ddrtree_ordering_xj(dp_mst, dp,
# root_cell = root_cell, verbose=T)
plot(test_res$state_mst, layout = layout_as_tree(test_res$state_mst))
qplot(DPT_res@dm$DC1, DPT_res@dm$DC2, color = pData(test)$State)
qplot(DPT_res@dm$DC1, DPT_res@dm$DC2, color = cc_ordering[as.character(test_res$ordering_df[colnames(test), 'cell_state']), 'cell_state'])
#
# calClusteringMetrics(cc_ordering[as.character(test_res$ordering_df[colnames(valid_subset_GSE72857_cds2), 'cell_state']), 'cell_state'], pData(valid_subset_GSE72857_cds2)$cell_type2)
# calClusteringMetrics(pData(valid_subset_GSE72857_cds2)$State, pData(valid_subset_GSE72857_cds2)$cell_type2)
#
# #load the data from the Fabian group's processed results:
# load('./script_for_reproduce/MARSseq_analysis_tutorial.RData')
# calClusteringMetrics(pData(valid_subset_GSE72857_cds2)$cluster, cluster.id[cluster.id[, 1] != 19, 1]) #confirm that index matches up
# calClusteringMetrics(branching[cluster.id[, 1] != 19], pData(valid_subset_GSE72857_cds2)$cell_type2) #check the result
#
# ARI_branches <- data.frame(ARI = c(0.5923145, 0.6566774, 0.7225239), Type = c('DPT (original)', 'DPT + Monocle 2', 'Monocle 2'))
# qplot(ARI, geom = 'bar', data = ARI_branches, stat = 'identity')
#
# pdf(paste(SI_fig_dir, 'MARS_seq_ARI_branch_cluster.pdf', sep = ''), width = 2, height = 1)
# ggplot(data = ARI_branches, aes(Type, ARI)) + geom_bar(stat = 'identity', aes(fill = Type)) + nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + xlab('')
# dev.off()
#
# pdf(paste(SI_fig_dir, 'MARS_seq_pseudotime_correspondence.pdf', sep = ''), width = 1, height = 1)
# qplot(DPT_res$DPT24, pData(valid_subset_GSE72857_cds2)$Pseudotime, color = pData(valid_subset_GSE72857_cds2)$State, size = I(0.25)) + xlab('DPT diffusion \n pseudotime') + ylab('Monocle 2 \n pseudotime') + nm_theme()
# dev.off()
#
# pdf(paste(SI_fig_dir, 'MARS_seq_pseudotime_correspondence_helper.pdf', sep = ''))
# qplot(DPT_res$DPT24, pData(valid_subset_GSE72857_cds2)$Pseudotime, color = pData(valid_subset_GSE72857_cds2)$State, size = I(0.25)) + xlab('DPT diffusion pseudotime') + ylab('Monocle 2 pseudotime')
# dev.off()
diameter_path_ordering <- function(cds, num_paths, root_state) {
root_cell <- monocle::select_root_cell(cds, root_state = root_state)
dp_mst <- cds@minSpanningTree
dp <- cellPairwiseDistances(cds)
diamter_path_old <- get_diameter(dp_mst)
branchpoint <- c()
dp_mst_tmp <- delete.vertices(dp_mst, diamter_path_old$name)
for(i in 1:(num_paths - 1)) {
diamter_path_current <- get_diameter(dp_mst_tmp)
start_cell_distance <- distances(dp_mst, v = diamter_path_current$name[1], to = diamter_path_old$name)
branchpoint <- c(branchpoint, colnames(start_cell_distance)[which.min(start_cell_distance)]) #find the ce
diamter_path_old <- diamter_path_current
dp_mst_tmp <- delete.vertices(dp_mst_tmp, diamter_path_old$name)
}
nei <- NULL
type <- NULL
pseudo_time <- NULL
curr_state <- 1
res <- list(subtree = dp_mst, root = root_cell)
states = rep(1, ncol(dp))
names(states) <- V(dp_mst)$name
pseudotimes = rep(0, ncol(dp))
names(pseudotimes) <- V(dp_mst)$name
parents = rep(NA, ncol(dp))
names(parents) <- V(dp_mst)$name
mst_traversal <- graph.dfs(dp_mst,
root=root_cell,
neimode = "all",
unreachable=FALSE,
father=TRUE)
mst_traversal$father <- as.numeric(mst_traversal$father)
curr_state <- 1
state_stat <- table(degree(dp_mst)[degree(dp_mst) > 2])
state_total_num <- sum(state_stat * 2:(length(state_stat) + 1))
node_num <- state_total_num + 2
state_mst <- make_empty_graph(n = node_num) #number of states
state_mst <- add_edges(state_mst, c(node_num, 1))
for (i in 1:length(mst_traversal$order)){
curr_node = mst_traversal$order[i]
curr_node_name = V(dp_mst)[curr_node]$name
if (is.na(mst_traversal$father[curr_node]) == FALSE){
parent_node = mst_traversal$father[curr_node]
parent_node_name = V(dp_mst)[parent_node]$name
parent_node_pseudotime = pseudotimes[parent_node_name]
parent_node_state = states[parent_node_name]
curr_node_pseudotime = parent_node_pseudotime + dp[curr_node_name, parent_node_name]
if (degree(dp_mst, v=parent_node_name) > 2 & parent_node_name %in% branchpoint){
curr_state <- curr_state + 1
# if(curr_state >= 1405){
# # browser()
# }
message('current state is ', curr_state, 'parent state is ', parent_node_state)
state_mst <- add_edges(state_mst, c(parent_node_state, curr_state))
}
}else{
parent_node = NA
parent_node_name = NA
curr_node_pseudotime = 0
}
curr_node_state = curr_state
pseudotimes[curr_node_name] <- curr_node_pseudotime
states[curr_node_name] <- curr_node_state
parents[curr_node_name] <- parent_node_name
}
ordering_df <- data.frame(sample_name = names(states),
cell_state = factor(states),
pseudo_time = as.vector(pseudotimes),
parent = parents)
row.names(ordering_df) <- ordering_df$sample_name
# ordering_df <- plyr::arrange(ordering_df, pseudo_time)
E(state_mst)$weight <- c(0.1, table(ordering_df$cell_state))
V(state_mst)$name <- as.character(1:node_num)
state_mst <- as.undirected(state_mst)
# return(list(ordering_df = ordering_df, state_mst = state_mst))
ordering_df
pData(cds)$State <- ordering_df$cell_state
pData(cds)$Pseudotime <- ordering_df$pseudo_time
return(cds)
}
i <- 20
test <- cds_downsampled_cells_ordered_update_dpt[[i]]
test <- reduceDimension(test, max_components = 2, norm_method = 'none', scaling = F, reduction_method = 'DPT')
test <- diameter_path_ordering(test, num_paths = 2, root_state = Root_state(test)) # 'Cell_2'
plot_cell_trajectory(test)
cds_downsampled_cells_ordered_update_diffusion_pseudotime <- lapply(1:length(cds_downsampled_cells_ordered_update_dpt), function(i) {
message('current id is ', i)
test <- cds_downsampled_cells_ordered_update[[i]]
test <- reduceDimension(test, max_components = 2, norm_method = 'none', scaling = F, reduction_method = 'DPT')
tryCatch({
test <- diameter_path_ordering(test, num_paths = 2, root_state = Root_state(test)) # 'Cell_2'
return(test)
}, error = function(e) {
warning('Your initial method throws numerical errors!')
test <- orderCells(test)
return(test)
})
})
cds_downsampled_cells_ordered_0.8_diffusion_pseudotime <- lapply(1:length(cds_downsampled_cells_ordered_0.8_update), function(i) {
message('current id is ', i)
test <- cds_downsampled_cells_ordered_update[[i]]
test <- reduceDimension(test, max_components = 2, norm_method = 'none', scaling = F, reduction_method = 'DPT')
tryCatch({
test <- diameter_path_ordering(test, num_paths = 2, root_state = Root_state(test)) # 'Cell_2'
return(test)
}, error = function(e) {
warning('Your initial method throws numerical errors!')
test <- orderCells(test)
return(test)
})
})
# show the results for enhanced Monocle 2 + DPT
| /Supplementary_scripts/revision_1/revision_1_accuracy_na_simulation_res.R | no_license | lxparadox/monocle2-rge-paper | R | false | false | 37,753 | r | library(monocle)
library(xacHelper)
library(grid)
library(mcclust)
library(plyr)
#
# # load('./RData/na_sim_data_robustness_dpt_slicer_wishbone.RData')
#
##########################################################################################################################################################################
# perform the accuracy analysis with the true pseudotime and branch
##########################################################################################################################################################################
# ICA_cds_downsampled_cells_ordered_0.8
# cds_downsampled_cells_ordered_0.8
neuron_cells <- colnames(absolute_cds)[1:400]
astrocyte_cells <- colnames(absolute_cds)[401:800]
root_state <- row.names(subset(pData(absolute_cds), State == 1))
reorder_cell_cds <- function (cds, root_state) {
#determine the mapping between original state 1/2/3 and new state 1/2/3:
overlap_state_1 <- length(intersect(row.names(pData(cds)[pData(cds)$State == 1, ]), root_state))
overlap_state_2 <- length(intersect(row.names(pData(cds)[pData(cds)$State == 2, ]), root_state))
overlap_state_3 <- length(intersect(row.names(pData(cds)[pData(cds)$State == 3, ]), root_state))
#find the state corresponding to the original root state
overlap_vec <- c(overlap_state_1, overlap_state_2, overlap_state_3)
max_ind <- which(overlap_vec == max(overlap_vec))
if(0 %in% pData(cds)[pData(cds)$State == max_ind, 'Pseudotime']) #avoid recalculation of the Pseudotime
return(cds)
cds = orderCells(cds, root_state = max_ind)
if(length(unique(pData(cds)$State)) > 3)
cds <- trimTree(cds)
print('pass') #
return(cds)
}
cds_downsampled_cells_ordered_0.8_update <- lapply(cds_downsampled_cells_ordered_0.8, reorder_cell_cds, root_state)
cds_downsampled_cells_ordered_update <- lapply(cds_downsampled_cells_ordered, reorder_cell_cds, root_state)
pairwise_cal_benchmark_res <- function(cds_1, cds_2) {
overlpa_cells <- intersect(colnames(cds_1), colnames(cds_2))
if(length(unique(pData(cds_1)$State)) > 3){
cds_1 <- trimTree(cds_1, num_paths = 2, min_branch_thrsld = 0.1)
}
if(length(unique(pData(cds_2)$State)) > 3){
cds_2 <- trimTree(cds_2, num_paths = 2, min_branch_thrsld = 0.1)
}
overlpa_cells <- intersect(colnames(cds_1), colnames(cds_2))
neuron_t_1 <- pData(cds_1[, intersect(overlpa_cells, neuron_cells)])$Pseudotime
neuron_t_2 <- pData(cds_2[, intersect(overlpa_cells, neuron_cells)])$Pseudotime
astrocyte_t_1 <- pData(cds_1[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime
astrocyte_t_2 <- pData(cds_2[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
#branch assignment:
clusters_1 <- as.character(pData(cds_1[, overlpa_cells])$State)
clusters_2 <- as.character(pData(cds_2[, overlpa_cells])$State)
ClusteringMetrics_res <- calClusteringMetrics(as.character(clusters_1), as.character(clusters_2))
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
}
# absolute_cds <- cds_downsampled_cells_ordered_update[[36]]
monocle2_benchmark_res_list <- lapply(cds_downsampled_cells_ordered_0.8_update, function(x) pairwise_cal_benchmark_res(x, absolute_cds)) #
progressive_monocle2_benchmark_res_list <- lapply(cds_downsampled_cells_ordered_update, function(x) pairwise_cal_benchmark_res(x, absolute_cds)) #
monocle_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(monocle2_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(monocle2_benchmark_res_list, function(x) x$cluster[3, 1]))
)
progressive_monocle_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(progressive_monocle2_benchmark_res_list, function(x) x$cluster[3, 1]))
)
#dpt_cds_downsampled_cells_ordered_0.8
dpt_benchmark_res_list <- lapply(1:length(dpt_cds_downsampled_cells_ordered_0.8), function(x) {
dpt_res <- dpt_cds_downsampled_cells_ordered_0.8[[x]]
overlap_cells <- intersect(names(dpt_res$pt), colnames(absolute_cds))
if(length(overlap_cells)) {
neuron_t_1 <- dpt_res$pt[intersect(overlpa_cells, neuron_cells)]#[overlpa_cells, 'DPT']
neuron_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, neuron_cells)])$Pseudotime#[overlpa_cells, 'DPT']
astrocyte_t_1 <- dpt_res$pt[intersect(overlpa_cells, astrocyte_cells)]#[overlpa_cells, 'DPT']
astrocyte_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime#[overlpa_cells, 'DPT']
# cor_res.0 <- cor(t_1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.1 <- cor(t_1.1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.2 <- cor(t_1.2, t_2, method = 'kendall', use = 'pairwise.complete.obs')
#
# cor_res <- max(c(cor_res.0, cor_res.1, cor_res.2))
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
#branch assignment:
# overlpa_cells_update <- overlpa_cells[intersect(which(as.character(dpt_cds_downsampled_cells_ordered[[x[[1]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')),
# which(as.character(dpt_cds_downsampled_cells_ordered[[x[[2]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')))] #remove the unassigned 1,2,3, uncertain 1,2,3, cells
clusters_1 <- as.character(dpt_res$branch[overlap_cells, 1])
clusters_2 <- as.character(pData(absolute_cds[, overlap_cells])$State)
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
}
else
return(list(cor = NA, kendall_tau = NA, cluster = data.frame(randIndex = c(NA, NA, NA), Type = c("rand index", "variation of information", "adjusted rand index")), raw_cor = NA, raw_kendall_tau = NA))
})
progressive_dpt_benchmark_res_list <- lapply(1:length(dpt_cds_downsampled_cells_ordered), function(x) {
dpt_res <- dpt_cds_downsampled_cells_ordered[[x]]
overlap_cells <- intersect(names(dpt_res$pt), colnames(absolute_cds))
if(length(overlap_cells)) {
neuron_t_1 <- dpt_res$pt[intersect(overlpa_cells, neuron_cells)]#[overlpa_cells, 'DPT']
neuron_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, neuron_cells)])$Pseudotime#[overlpa_cells, 'DPT']
astrocyte_t_1 <- dpt_res$pt[intersect(overlpa_cells, astrocyte_cells)]#[overlpa_cells, 'DPT']
astrocyte_t_2 <- pData(absolute_cds[, intersect(overlpa_cells, astrocyte_cells)])$Pseudotime#[overlpa_cells, 'DPT']
# cor_res.0 <- cor(t_1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.1 <- cor(t_1.1, t_2, method = 'kendall', use = 'pairwise.complete.obs')
# cor_res.2 <- cor(t_1.2, t_2, method = 'kendall', use = 'pairwise.complete.obs')
#
# cor_res <- max(c(cor_res.0, cor_res.1, cor_res.2))
cor_res <- c(cor(neuron_t_1, neuron_t_2, use = "na.or.complete"), cor(astrocyte_t_1, astrocyte_t_2, use = "na.or.complete"))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
#branch assignment:
# overlpa_cells_update <- overlpa_cells[intersect(which(as.character(dpt_cds_downsampled_cells_ordered[[x[[1]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')),
# which(as.character(dpt_cds_downsampled_cells_ordered[[x[[2]]]]$pt[overlpa_cells, 'Branch']) %in% c('branch 1', 'branch 2', 'branch 3')))] #remove the unassigned 1,2,3, uncertain 1,2,3, cells
clusters_1 <- as.character(dpt_res$branch[overlap_cells, 1])
clusters_2 <- as.character(pData(absolute_cds[, overlap_cells])$State)
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
}
else
return(list(cor = NA, kendall_tau = NA, cluster = data.frame(randIndex = c(NA, NA, NA), Type = c("rand index", "variation of information", "adjusted rand index")), raw_cor = NA, raw_kendall_tau = NA))
})
dpt_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(dpt_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(dpt_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(dpt_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(dpt_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(dpt_benchmark_res_list, function(x) x$cluster[3, 1]))
)
progressive_dpt_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(progressive_dpt_benchmark_res_list, function(x) x$cluster[3, 1]))
)
# wishbone_res
# fraction_wishbone_res
wishbone_benchmark_res_list <- lapply(unique(wishbone_res$run), function(ind) {
message(ind)
subset_wishbone_res <- subset(wishbone_res, run == ind)
row.names(subset_wishbone_res) <- subset_wishbone_res[, 1]
overlpa_cells <- intersect(colnames(absolute_cds), row.names(subset_wishbone_res) )
neuron_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, neuron_cells), 'Pseudotime']
neuron_t_2 <- subset_wishbone_res[intersect(overlpa_cells, neuron_cells), 'trajectory']
astrocyte_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, astrocyte_cells), 'Pseudotime']
astrocyte_t_2 <- subset_wishbone_res[intersect(overlpa_cells, astrocyte_cells), 'trajectory']
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
# print(t_1)
# print(t_2)
if(cor_res < 0 & is.finite(cor_res)){
start_cell_id <- which(neuron_t_1 == min(neuron_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
neuron_t_2_update <- abs(neuron_t_2 - neuron_t_2[start_cell_id])
start_cell_id <- which(astrocyte_t_1 == min(astrocyte_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
astrocyte_t_2_update <- abs(astrocyte_t_2 - astrocyte_t_2[start_cell_id])
cor_res <- c(cor(neuron_t_1, neuron_t_2_update), cor(astrocyte_t_1, astrocyte_t_2_update))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'))
}
#branch assignment:
clusters_1 <- as.character(pData(absolute_cds)[overlpa_cells, 'State'])
clusters_2 <- as.character(subset_wishbone_res[overlpa_cells, 'branch'])
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
})
progressive_wishbone_benchmark_res_list <- lapply(unique(fraction_wishbone_res$run), function(ind) {
message(ind)
subset_wishbone_res <- subset(fraction_wishbone_res, run == ind)
row.names(subset_wishbone_res) <- subset_wishbone_res[, 1]
overlpa_cells <- intersect(colnames(absolute_cds), row.names(subset_wishbone_res) )
neuron_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, neuron_cells), 'Pseudotime']
neuron_t_2 <- subset_wishbone_res[intersect(overlpa_cells, neuron_cells), 'trajectory']
astrocyte_t_1 <- pData(absolute_cds)[intersect(overlpa_cells, astrocyte_cells), 'Pseudotime']
astrocyte_t_2 <- subset_wishbone_res[intersect(overlpa_cells, astrocyte_cells), 'trajectory']
cor_res <- c(cor(neuron_t_1, neuron_t_2), cor(astrocyte_t_1, astrocyte_t_2))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2, method = 'kendall', use = 'pairwise.complete.obs'))
# print(t_1)
# print(t_2)
if(any(cor_res < 0) & any(is.finite(cor_res))){
start_cell_id <- which(neuron_t_1 == min(neuron_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
neuron_t_2_update <- abs(neuron_t_2 - neuron_t_2[start_cell_id])
start_cell_id <- which(astrocyte_t_1 == min(astrocyte_t_1, na.rm = T)) #finding the starting cell (cell with smallest pseudotime) in the overlapping set
astrocyte_t_2_update <- abs(astrocyte_t_2 - astrocyte_t_2[start_cell_id])
cor_res <- c(cor(neuron_t_1, neuron_t_2_update), cor(astrocyte_t_1, astrocyte_t_2_update))
kendall_cor_res <- c(cor(neuron_t_1, neuron_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'), cor(astrocyte_t_1, astrocyte_t_2_update, method = 'kendall', use = 'pairwise.complete.obs'))
}
#branch assignment:
clusters_1 <- as.character(pData(absolute_cds)[overlpa_cells, 'State'])
clusters_2 <- as.character(subset_wishbone_res[overlpa_cells, 'branch'])
ClusteringMetrics_res <- calClusteringMetrics(clusters_1, clusters_2)
return(list(cor = mean(abs(cor_res)), kendall_tau = mean(abs(kendall_cor_res)), cluster = ClusteringMetrics_res, raw_cor = cor_res, raw_kendall_tau = kendall_cor_res))
})
wishbone_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(wishbone_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(wishbone_benchmark_res_list, function(x) x$cluster[3, 1]))
)
progressive_wishbone_sampling_res_df <- data.frame(kendall.tau = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$kendall_tau)),
pearson_rho = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cor)),
rand_ind = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cluster[1, 1])),
var_inf = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cluster[2, 1])),
adj_rand = unlist(lapply(progressive_wishbone_benchmark_res_list, function(x) x$cluster[3, 1]))
)
downsamling_marker_all_sampling_res_df <- Reduce(rbind , list(dpt_sampling_res_df, monocle_sampling_res_df, wishbone_sampling_res_df, dpt_sampling_res_df)) # ICA_sampling_res_df,
downsamling_marker_all_sampling_res_df$Type <- c(rep('dpt', nrow(dpt_sampling_res_df)), rep('monocle2', nrow(monocle_sampling_res_df)), rep('wishbone', nrow(wishbone_sampling_res_df)), rep('monocle1', nrow(dpt_sampling_res_df)))#, rep('Monocle1', 10000)
downsamling_marker_all_sampling_res_df$Type <- factor(downsamling_marker_all_sampling_res_df$Type, levels = c('monocle2', 'monocle1', "dpt", "wishbone")) #dpt (non-uniform branch)
pdf(paste(SI_fig_dir, benchmark_type, 'real_simulation_pearson_rho_comparison_robustness.pdf', sep = ''), width = 1, height = 1.5)
qplot(Type, pearson_rho, data = downsamling_marker_all_sampling_res_df, color = Type, geom = 'boxplot') + ylab("Pearson's Rho") + monocle_theme_opts() + xlab('') +
nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'real_simulation_all_kendall_tau_comparison_robustness.pdf', sep = ''), width = 1, height = 1.5)
qplot(Type, kendall.tau, data = downsamling_marker_all_sampling_res_df, color = Type, geom = 'boxplot') + ylab("Kendall's tau") + monocle_theme_opts() + xlab('') +
nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'real_simulation_ArI_comparison_robustness.pdf', sep = ''), width = 1, height = 1.5)
qplot(Type, adj_rand, data = downsamling_marker_all_sampling_res_df, color = Type, geom = 'boxplot') + ylab("Adjusted Rand index") + monocle_theme_opts() + xlab('') +
nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
progressive_all_valid_cell_sampling_res_df <- Reduce(rbind , list(progressive_dpt_sampling_res_df, progressive_monocle_sampling_res_df, progressive_wishbone_sampling_res_df, progressive_dpt_sampling_res_df)) # ICA_sampling_res_df,
progressive_all_valid_cell_sampling_res_df$proportion <- c(rep(downsampled_proportions, 2), names(cds_downsampled_cells_ordered)[unique(fraction_wishbone_res$run)], downsampled_proportions)
progressive_all_valid_cell_sampling_res_df$Type <- c(rep('dpt', 36), rep('monocle2', 36), rep('wishbone', length(unique(fraction_wishbone_res$run))), rep('monocle1', 36))
progressive_all_valid_cell_sampling_res_df$Type <- factor(progressive_all_valid_cell_sampling_res_df$Type, levels = c('monocle2', 'monocle1', "dpt", "wishbone"))
progressive_all_valid_cell_sampling_res_df$se <- 0.1
pdf(paste(SI_fig_dir, benchmark_type, 'pearson_real_simulation_rho_comparison_cell_downsampling.pdf', sep = ''), width = 2.5, height = 2)
qplot(proportion, abs(progressive_all_valid_cell_sampling_res_df$pearson_rho), data = progressive_all_valid_cell_sampling_res_df, color = Type, size = 1, geom = 'boxplot') +
xlab('Proportion of original cells') + ylab("Pearson's Rho") + nm_theme() + scale_size(range = c(0.1, 1)) + monocle_theme_opts() + ylim(0, 1) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'rand_index_comparison_real_simulation_downsampling.pdf', sep = ''), width = 2.5, height = 2)
qplot(proportion, abs(progressive_all_valid_cell_sampling_res_df$adj_rand), data = progressive_all_valid_cell_sampling_res_df, color = Type, size = 1, geom = 'boxplot') +
xlab('Proportion of original cells') + ylab("Adjusted rand index") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1)
dev.off()
progressive_process_cell_sampling_res_df <- ddply(progressive_all_valid_cell_sampling_res_df, .(Type, proportion), summarize,
mean_kendall.tau = mean(abs(kendall.tau), na.rm = T),
sd_kendall.tau = sd(abs(kendall.tau), na.rm = T),
mean_pearson_rho = mean(abs(pearson_rho), na.rm = T),
sd_pearson_rho = sd(abs(pearson_rho), na.rm = T),
mean_adj_rand = mean(abs(adj_rand), na.rm = T),
sd_adj_rand = sd(abs(adj_rand), na.rm = T),
se = mean(se))
limits <- aes(ymax = mean_adj_rand + sd_adj_rand, ymin=mean_adj_rand - sd_adj_rand)
pdf(paste(SI_fig_dir, benchmark_type, 'rand_index_real_simulation_comparison_cell_downsampling2.pdf', sep = ''), width = 2.5, height = 2)
ggplot(aes(proportion, mean_adj_rand), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.1), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_adj_rand + sd_adj_rand, ymin=mean_adj_rand - sd_adj_rand), position=position_dodge(width=0.9), size = 0.5) +
xlab('Proportion of original cells') + ylab("Adjusted Rand index") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'pearson_rho_real_simulation_comparison_cell_downsampling2.pdf', sep = ''), width = 2.5, height = 2)
ggplot(aes(proportion, mean_pearson_rho), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.9), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_pearson_rho + sd_pearson_rho, ymin=mean_pearson_rho - sd_pearson_rho), position=position_dodge(width=0.1), size = 0.5) +
xlab('Proportion of original cells') + ylab("Pearson's Rho") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'kendall_tau_real_simulation_comparison_cell_downsampling2.pdf', sep = ''), width = 2.5, height = 2)
ggplot(aes(proportion, mean_kendall.tau), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.9), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_kendall.tau + sd_kendall.tau, ymin=mean_kendall.tau - sd_kendall.tau), position=position_dodge(width=0.1), size = 0.5) +
xlab('Proportion of original cells') + ylab("Kendall's Tau") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
pdf(paste(SI_fig_dir, benchmark_type, 'kendall_tau_real_simulation_comparison_cell_downsampling2_cole.pdf', sep = ''), width = 3, height = 1)
ggplot(aes(proportion, mean_kendall.tau), data = progressive_process_cell_sampling_res_df) +
geom_point(aes(color = Type), position=position_dodge(width=0.9), size = 0.5) + facet_wrap(~Type) +
geom_errorbar(aes(color = Type, ymax = mean_kendall.tau + sd_kendall.tau, ymin=mean_kendall.tau - sd_kendall.tau), position=position_dodge(width=0.1), size = 0.5) +
xlab('Proportion of original cells') + ylab("Kendall's Tau") + scale_size(range = c(0.1, 1)) + nm_theme() + monocle_theme_opts() + ylim(0, 1) +
theme(axis.text.x = element_text(angle = 30, hjust = 1)) + ylim(0, 1) + scale_color_manual(values = software_custom_color_scale)
dev.off()
##########################################################################################################################################################################
# save the dataset
##########################################################################################################################################################################
save.image(paste('./RData/', benchmark_type, '_real_simulation_na.RData', sep = ''))
##########################################################################################################################################################################
# use diffusion distance to perform the trajectory reconstruction
##########################################################################################################################################################################
library(monocle)
library(destiny)
library(mcclust)
library(plyr)
extract_ddrtree_ordering_xj <- function(dp_mst, dp = dp, root_cell, verbose=T) # dp,
{
nei <- NULL
type <- NULL
pseudo_time <- NULL
curr_state <- 1
res <- list(subtree = dp_mst, root = root_cell)
states = rep(1, ncol(dp))
names(states) <- V(dp_mst)$name
pseudotimes = rep(0, ncol(dp))
names(pseudotimes) <- V(dp_mst)$name
parents = rep(NA, ncol(dp))
names(parents) <- V(dp_mst)$name
mst_traversal <- graph.dfs(dp_mst,
root=root_cell,
neimode = "all",
unreachable=FALSE,
father=TRUE)
mst_traversal$father <- as.numeric(mst_traversal$father)
curr_state <- 1
state_stat <- table(degree(dp_mst)[degree(dp_mst) > 2])
state_total_num <- sum(state_stat * 2:(length(state_stat) + 1))
node_num <- state_total_num + 2
state_mst <- make_empty_graph(n = node_num) #number of states
state_mst <- add_edges(state_mst, c(node_num, 1))
for (i in 1:length(mst_traversal$order)){
curr_node = mst_traversal$order[i]
curr_node_name = V(dp_mst)[curr_node]$name
if (is.na(mst_traversal$father[curr_node]) == FALSE){
parent_node = mst_traversal$father[curr_node]
parent_node_name = V(dp_mst)[parent_node]$name
parent_node_pseudotime = pseudotimes[parent_node_name]
parent_node_state = states[parent_node_name]
curr_node_pseudotime = parent_node_pseudotime + dp[curr_node_name, parent_node_name]
if (degree(dp_mst, v=parent_node_name) > 2){
curr_state <- curr_state + 1
# if(curr_state >= 1405){
# # browser()
# }
message('current state is ', curr_state, 'parent state is ', parent_node_state)
state_mst <- add_edges(state_mst, c(parent_node_state, curr_state))
}
}else{
parent_node = NA
parent_node_name = NA
curr_node_pseudotime = 0
}
curr_node_state = curr_state
pseudotimes[curr_node_name] <- curr_node_pseudotime
states[curr_node_name] <- curr_node_state
parents[curr_node_name] <- parent_node_name
}
ordering_df <- data.frame(sample_name = names(states),
cell_state = factor(states),
pseudo_time = as.vector(pseudotimes),
parent = parents)
row.names(ordering_df) <- ordering_df$sample_name
# ordering_df <- plyr::arrange(ordering_df, pseudo_time)
E(state_mst)$weight <- c(0.1, table(ordering_df$cell_state))
V(state_mst)$name <- as.character(1:node_num)
state_mst <- as.undirected(state_mst)
return(list(ordering_df = ordering_df, state_mst = state_mst))
state_mst
}
##################################################################################################################################################################
# na simulation dataset (not working below, we really need to have a better method for assigning branches based on some graph operations )
##################################################################################################################################################################
# root_cell <- paste('Y_', test@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[row.names(subset(pData(valid_subset_GSE72857_cds2), Pseudotime == 0)), 1], sep = '')
colnames(test)
test_res <- extract_ddrtree_ordering_xj(test@minSpanningTree, cellPairwiseDistances(test),
root_cell = 1, verbose=T) # 'Cell_2'
next_node <<- 0
dp_mst <- test_res$state_mst
dp_mst <- as.undirected(dp_mst)
dp = distances(dp_mst, v = V(dp_mst), to = V(dp_mst), weights = NULL)
# res <- monocle:::pq_helper(dp_mst, use_weights=T, root_node=2)
res <- pq_helper(dp_mst, use_weights=T, root_node=which(degree(dp_mst) == 1)[1])
if(is.null(branch_num))
branch_num <- sum(degree(dp_mst) > 2) + 1
branch_num <- 2 #6 cell types in the end
order_list <- monocle:::extract_good_branched_ordering(res$subtree, res$root, dp, branch_num, FALSE)
cc_ordering <- order_list$ordering_df
row.names(cc_ordering) <- cc_ordering$sample_name
data_ori <- as.matrix(t(exprs(test)))
data_uniq <- data_ori[!duplicated(data_ori), ]
dm <- DiffusionMap(as.matrix(data_ori))
DPT_res <- DPT(dm)
cell_num <- length(DPT_res$DPT1)
#
# dp <- DPT_res[1:cell_num, 1:cell_num]
# dimnames(dp) <- list(colnames(test)[!duplicated(data_ori)], colnames(test)[!duplicated(data_ori)])
# gp <- graph.adjacency(dp, mode = "undirected", weighted = TRUE)
# dp_mst <- minimum.spanning.tree(gp)
#
# root_cell <- row.names(subset(pData(test), Pseudotime == 0))
# test_res <- extract_ddrtree_ordering_xj(dp_mst, dp,
# root_cell = root_cell, verbose=T)
plot(test_res$state_mst, layout = layout_as_tree(test_res$state_mst))
qplot(DPT_res@dm$DC1, DPT_res@dm$DC2, color = pData(test)$State)
qplot(DPT_res@dm$DC1, DPT_res@dm$DC2, color = cc_ordering[as.character(test_res$ordering_df[colnames(test), 'cell_state']), 'cell_state'])
#
# calClusteringMetrics(cc_ordering[as.character(test_res$ordering_df[colnames(valid_subset_GSE72857_cds2), 'cell_state']), 'cell_state'], pData(valid_subset_GSE72857_cds2)$cell_type2)
# calClusteringMetrics(pData(valid_subset_GSE72857_cds2)$State, pData(valid_subset_GSE72857_cds2)$cell_type2)
#
# #load the data from the Fabian group's processed results:
# load('./script_for_reproduce/MARSseq_analysis_tutorial.RData')
# calClusteringMetrics(pData(valid_subset_GSE72857_cds2)$cluster, cluster.id[cluster.id[, 1] != 19, 1]) #confirm that index matches up
# calClusteringMetrics(branching[cluster.id[, 1] != 19], pData(valid_subset_GSE72857_cds2)$cell_type2) #check the result
#
# ARI_branches <- data.frame(ARI = c(0.5923145, 0.6566774, 0.7225239), Type = c('DPT (original)', 'DPT + Monocle 2', 'Monocle 2'))
# qplot(ARI, geom = 'bar', data = ARI_branches, stat = 'identity')
#
# pdf(paste(SI_fig_dir, 'MARS_seq_ARI_branch_cluster.pdf', sep = ''), width = 2, height = 1)
# ggplot(data = ARI_branches, aes(Type, ARI)) + geom_bar(stat = 'identity', aes(fill = Type)) + nm_theme() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + xlab('')
# dev.off()
#
# pdf(paste(SI_fig_dir, 'MARS_seq_pseudotime_correspondence.pdf', sep = ''), width = 1, height = 1)
# qplot(DPT_res$DPT24, pData(valid_subset_GSE72857_cds2)$Pseudotime, color = pData(valid_subset_GSE72857_cds2)$State, size = I(0.25)) + xlab('DPT diffusion \n pseudotime') + ylab('Monocle 2 \n pseudotime') + nm_theme()
# dev.off()
#
# pdf(paste(SI_fig_dir, 'MARS_seq_pseudotime_correspondence_helper.pdf', sep = ''))
# qplot(DPT_res$DPT24, pData(valid_subset_GSE72857_cds2)$Pseudotime, color = pData(valid_subset_GSE72857_cds2)$State, size = I(0.25)) + xlab('DPT diffusion pseudotime') + ylab('Monocle 2 pseudotime')
# dev.off()
diameter_path_ordering <- function(cds, num_paths, root_state) {
root_cell <- monocle::select_root_cell(cds, root_state = root_state)
dp_mst <- cds@minSpanningTree
dp <- cellPairwiseDistances(cds)
diamter_path_old <- get_diameter(dp_mst)
branchpoint <- c()
dp_mst_tmp <- delete.vertices(dp_mst, diamter_path_old$name)
for(i in 1:(num_paths - 1)) {
diamter_path_current <- get_diameter(dp_mst_tmp)
start_cell_distance <- distances(dp_mst, v = diamter_path_current$name[1], to = diamter_path_old$name)
branchpoint <- c(branchpoint, colnames(start_cell_distance)[which.min(start_cell_distance)]) #find the ce
diamter_path_old <- diamter_path_current
dp_mst_tmp <- delete.vertices(dp_mst_tmp, diamter_path_old$name)
}
nei <- NULL
type <- NULL
pseudo_time <- NULL
curr_state <- 1
res <- list(subtree = dp_mst, root = root_cell)
states = rep(1, ncol(dp))
names(states) <- V(dp_mst)$name
pseudotimes = rep(0, ncol(dp))
names(pseudotimes) <- V(dp_mst)$name
parents = rep(NA, ncol(dp))
names(parents) <- V(dp_mst)$name
mst_traversal <- graph.dfs(dp_mst,
root=root_cell,
neimode = "all",
unreachable=FALSE,
father=TRUE)
mst_traversal$father <- as.numeric(mst_traversal$father)
curr_state <- 1
state_stat <- table(degree(dp_mst)[degree(dp_mst) > 2])
state_total_num <- sum(state_stat * 2:(length(state_stat) + 1))
node_num <- state_total_num + 2
state_mst <- make_empty_graph(n = node_num) #number of states
state_mst <- add_edges(state_mst, c(node_num, 1))
for (i in 1:length(mst_traversal$order)){
curr_node = mst_traversal$order[i]
curr_node_name = V(dp_mst)[curr_node]$name
if (is.na(mst_traversal$father[curr_node]) == FALSE){
parent_node = mst_traversal$father[curr_node]
parent_node_name = V(dp_mst)[parent_node]$name
parent_node_pseudotime = pseudotimes[parent_node_name]
parent_node_state = states[parent_node_name]
curr_node_pseudotime = parent_node_pseudotime + dp[curr_node_name, parent_node_name]
if (degree(dp_mst, v=parent_node_name) > 2 & parent_node_name %in% branchpoint){
curr_state <- curr_state + 1
# if(curr_state >= 1405){
# # browser()
# }
message('current state is ', curr_state, 'parent state is ', parent_node_state)
state_mst <- add_edges(state_mst, c(parent_node_state, curr_state))
}
}else{
parent_node = NA
parent_node_name = NA
curr_node_pseudotime = 0
}
curr_node_state = curr_state
pseudotimes[curr_node_name] <- curr_node_pseudotime
states[curr_node_name] <- curr_node_state
parents[curr_node_name] <- parent_node_name
}
ordering_df <- data.frame(sample_name = names(states),
cell_state = factor(states),
pseudo_time = as.vector(pseudotimes),
parent = parents)
row.names(ordering_df) <- ordering_df$sample_name
# ordering_df <- plyr::arrange(ordering_df, pseudo_time)
E(state_mst)$weight <- c(0.1, table(ordering_df$cell_state))
V(state_mst)$name <- as.character(1:node_num)
state_mst <- as.undirected(state_mst)
# return(list(ordering_df = ordering_df, state_mst = state_mst))
ordering_df
pData(cds)$State <- ordering_df$cell_state
pData(cds)$Pseudotime <- ordering_df$pseudo_time
return(cds)
}
i <- 20
test <- cds_downsampled_cells_ordered_update_dpt[[i]]
test <- reduceDimension(test, max_components = 2, norm_method = 'none', scaling = F, reduction_method = 'DPT')
test <- diameter_path_ordering(test, num_paths = 2, root_state = Root_state(test)) # 'Cell_2'
plot_cell_trajectory(test)
cds_downsampled_cells_ordered_update_diffusion_pseudotime <- lapply(1:length(cds_downsampled_cells_ordered_update_dpt), function(i) {
message('current id is ', i)
test <- cds_downsampled_cells_ordered_update[[i]]
test <- reduceDimension(test, max_components = 2, norm_method = 'none', scaling = F, reduction_method = 'DPT')
tryCatch({
test <- diameter_path_ordering(test, num_paths = 2, root_state = Root_state(test)) # 'Cell_2'
return(test)
}, error = function(e) {
warning('Your initial method throws numerical errors!')
test <- orderCells(test)
return(test)
})
})
cds_downsampled_cells_ordered_0.8_diffusion_pseudotime <- lapply(1:length(cds_downsampled_cells_ordered_0.8_update), function(i) {
message('current id is ', i)
test <- cds_downsampled_cells_ordered_update[[i]]
test <- reduceDimension(test, max_components = 2, norm_method = 'none', scaling = F, reduction_method = 'DPT')
tryCatch({
test <- diameter_path_ordering(test, num_paths = 2, root_state = Root_state(test)) # 'Cell_2'
return(test)
}, error = function(e) {
warning('Your initial method throws numerical errors!')
test <- orderCells(test)
return(test)
})
})
# show the results for enhanced Monocle 2 + DPT
|
library(rcorpora)
### Name: corpora
### Title: Load a data set from the corpora package
### Aliases: corpora
### ** Examples
corpora()
corpora(category = "animals")
corpora("foods/pizzaToppings")
| /data/genthat_extracted_code/rcorpora/examples/corpora.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 203 | r | library(rcorpora)
### Name: corpora
### Title: Load a data set from the corpora package
### Aliases: corpora
### ** Examples
corpora()
corpora(category = "animals")
corpora("foods/pizzaToppings")
|
reduceDimentions<-function(cells, geneOfInterest, type="UMAP", colourby="cell_type", norm_type="batch_corrected"){
if (colourby=="Gene expression"){
colourby <- geneOfInterest
} else if (colourby=="Cell type" || colourby=="Cell-type based"){
colourby <- "cell_type"
} else if (colourby=="Batch") {
colourby <- "batch"
} else if (colourby=="Cluster-based"){
colourby <- "label"
}
if (norm_type=="fpkm" || norm_type=="tpm"){
counts <- assay(cells, norm_type)
assay(cells, "log2counts") <- log2(assay(cells, norm_type) + 1)
norm_type <- "log2counts"
}
# visualize
if (type=="UMAP"){
plotUMAP(cells, colour_by=colourby, by_exprs_values=norm_type, point_alpha=1, point_size=3)
} else {
plotPCA(cells, colour_by=colourby, by_exprs_values=norm_type, point_alpha=1, point_size=3)
}
}
| /plotDimRed.R | no_license | dalingard/sce | R | false | false | 866 | r | reduceDimentions<-function(cells, geneOfInterest, type="UMAP", colourby="cell_type", norm_type="batch_corrected"){
if (colourby=="Gene expression"){
colourby <- geneOfInterest
} else if (colourby=="Cell type" || colourby=="Cell-type based"){
colourby <- "cell_type"
} else if (colourby=="Batch") {
colourby <- "batch"
} else if (colourby=="Cluster-based"){
colourby <- "label"
}
if (norm_type=="fpkm" || norm_type=="tpm"){
counts <- assay(cells, norm_type)
assay(cells, "log2counts") <- log2(assay(cells, norm_type) + 1)
norm_type <- "log2counts"
}
# visualize
if (type=="UMAP"){
plotUMAP(cells, colour_by=colourby, by_exprs_values=norm_type, point_alpha=1, point_size=3)
} else {
plotPCA(cells, colour_by=colourby, by_exprs_values=norm_type, point_alpha=1, point_size=3)
}
}
|
##Program for 1st assignment - Plot 2
setwd("filepath/")
powerdata<-read.csv("filepath/household_power_consumption.txt",header = TRUE, sep = ";")
powerdata1<-powerdata[which(powerdata$Date == "1/2/2007"|powerdata$Date == "2/2/2007"),]
powerdata1$datetime<-strptime(paste(powerdata1$Date, powerdata1$Time),"%d/%m/%Y %H:%M:%S")
powerdata1$Global_active_power<-as.numeric(as.character(powerdata1$Global_active_power))
plot(powerdata1$datetime,powerdata1$Global_active_power,"l",xlab = "",ylab = "Global Active Power (Kilowatts)", main = "")
dev.copy(png,file="Plot2.png")
dev.off()
| /Plot2.R | no_license | TXu8/ExData_Plotting1 | R | false | false | 596 | r | ##Program for 1st assignment - Plot 2
setwd("filepath/")
powerdata<-read.csv("filepath/household_power_consumption.txt",header = TRUE, sep = ";")
powerdata1<-powerdata[which(powerdata$Date == "1/2/2007"|powerdata$Date == "2/2/2007"),]
powerdata1$datetime<-strptime(paste(powerdata1$Date, powerdata1$Time),"%d/%m/%Y %H:%M:%S")
powerdata1$Global_active_power<-as.numeric(as.character(powerdata1$Global_active_power))
plot(powerdata1$datetime,powerdata1$Global_active_power,"l",xlab = "",ylab = "Global Active Power (Kilowatts)", main = "")
dev.copy(png,file="Plot2.png")
dev.off()
|
Pick3<-function(nh, g)
{
kix = RSEIS::legitpix(g$sel, g$zloc, g$zenclick)
ypick = kix$ypick
ppick = kix$ppick
if(length(ppick)>0)
{
ipick = g$sel[ypick]
ipick = ipick[length(ipick)]
## cat(paste(sep=" ", ypick, ipick), sep="\n")
## print(ipick)
##
ma = which(!is.na(match( nh$STNS, nh$STNS[ipick])))
########## sort so Vertical is on top and then North and East
acomp = nh$COMPS[ma]
icomp = rep(0, length(acomp))
icomp[acomp=="V"] = 1
icomp[acomp=="N"] = 2
icomp[acomp=="E"] = 3
ma = ma[order(icomp)]
#### print(cbind(nh$STNS[ma], nh$COMPS[ma]))
if(is.null(g$Pickdev))
{
#### X11(width = 12, height = 7)
RSEIS::screens(2)
devl = dev.list()
iw = which(g$MAINdev!=devl)
g$Pickdev = devl[iw[1]]
dev.set(g$Pickdev)
}
else
{
#### devl = dev.list()
#### jsc = 2-length(devl)
#### if(jsc>0) { X11(width = 12, height = 7); Pickdev = dev.cur() }
dev.set(g$Pickdev)
}
if(g$zenclick>2)
{
pickwin = range( c(g$zloc$x[(g$zenclick-1)], g$zloc$x[(g$zenclick-2)]))
}
else
{
pickwin = g$WIN
}
PICKLAB = c("DONE", "Ppic", "Spic", "ZOOM.out","ZOOM.in", "REFRESH", "RESTORE",
"FILT", "UNFILT", "Pinfo", "WINFO", "ROT.RT")
PLAB=c( "Apic", "Pup", "Pdown", "Pnil", "AUTOP",
"NOPIX", "EDIX", "REPIX")
stit = nh$STNS[ma[1]]
## SWP = selAPX(WPX, nh$STNS[ma[1]], icomp=NULL )
## print(data.frame(SWP))
## SWP = rectifyAPX(SWP)
##
## print(SWP)
newpicks = RSEIS::swig(nh, APIX=g$WPX, sel=ma, WIN=pickwin,
STDLAB=PICKLAB ,PADDLAB=PLAB, PHASE=1 ,
SHOWONLY = FALSE, TIT=stit)
if(length(newpicks$g$WPX)>=1)
{
if(!is.null(newpicks$g$WPX))
{
g$WPX = newpicks$g$WPX
}
}
##
##
#### print(cbind(WPX$name, WPX$comp, WPX$phase, WPX$onoff))
g$NPX = length(g$WPX$name)
#### print(paste(sep=' ', "DONE with PICKWIN", g$NPX))
dev.set( g$MAINdev)
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(NH=nh, global.vars=g))
}
| /R/Pick3.R | no_license | cran/Rquake | R | false | false | 3,133 | r | Pick3<-function(nh, g)
{
kix = RSEIS::legitpix(g$sel, g$zloc, g$zenclick)
ypick = kix$ypick
ppick = kix$ppick
if(length(ppick)>0)
{
ipick = g$sel[ypick]
ipick = ipick[length(ipick)]
## cat(paste(sep=" ", ypick, ipick), sep="\n")
## print(ipick)
##
ma = which(!is.na(match( nh$STNS, nh$STNS[ipick])))
########## sort so Vertical is on top and then North and East
acomp = nh$COMPS[ma]
icomp = rep(0, length(acomp))
icomp[acomp=="V"] = 1
icomp[acomp=="N"] = 2
icomp[acomp=="E"] = 3
ma = ma[order(icomp)]
#### print(cbind(nh$STNS[ma], nh$COMPS[ma]))
if(is.null(g$Pickdev))
{
#### X11(width = 12, height = 7)
RSEIS::screens(2)
devl = dev.list()
iw = which(g$MAINdev!=devl)
g$Pickdev = devl[iw[1]]
dev.set(g$Pickdev)
}
else
{
#### devl = dev.list()
#### jsc = 2-length(devl)
#### if(jsc>0) { X11(width = 12, height = 7); Pickdev = dev.cur() }
dev.set(g$Pickdev)
}
if(g$zenclick>2)
{
pickwin = range( c(g$zloc$x[(g$zenclick-1)], g$zloc$x[(g$zenclick-2)]))
}
else
{
pickwin = g$WIN
}
PICKLAB = c("DONE", "Ppic", "Spic", "ZOOM.out","ZOOM.in", "REFRESH", "RESTORE",
"FILT", "UNFILT", "Pinfo", "WINFO", "ROT.RT")
PLAB=c( "Apic", "Pup", "Pdown", "Pnil", "AUTOP",
"NOPIX", "EDIX", "REPIX")
stit = nh$STNS[ma[1]]
## SWP = selAPX(WPX, nh$STNS[ma[1]], icomp=NULL )
## print(data.frame(SWP))
## SWP = rectifyAPX(SWP)
##
## print(SWP)
newpicks = RSEIS::swig(nh, APIX=g$WPX, sel=ma, WIN=pickwin,
STDLAB=PICKLAB ,PADDLAB=PLAB, PHASE=1 ,
SHOWONLY = FALSE, TIT=stit)
if(length(newpicks$g$WPX)>=1)
{
if(!is.null(newpicks$g$WPX))
{
g$WPX = newpicks$g$WPX
}
}
##
##
#### print(cbind(WPX$name, WPX$comp, WPX$phase, WPX$onoff))
g$NPX = length(g$WPX$name)
#### print(paste(sep=' ', "DONE with PICKWIN", g$NPX))
dev.set( g$MAINdev)
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(NH=nh, global.vars=g))
}
|
#####################################################
##
## Tidy Gapminder data
## Workshop Date: 2018-7-11
## Prepared by Jamie Bono (jamie@buffalo.edu)
##
####################################################
# Examine the data structures ====
# Using base functions ----
View(gapminder)
View(gapminder_from_csv)
str(gapminder)
str(gapminder_from_csv)
summary(gapminder)
summary(gapminder_from_csv)
# Using dplyr functions ----
glimpse(gapminder)
glimpse(gapminder_from_csv)
###############################################
# Goal: To get gapminder_from_csv to match structure of gapminder
###############################################
gapminder_clean <- gapminder_from_csv %>%
mutate(country = as.factor(country)) %>% # Convert character to factor
mutate(continent = as.factor(continent)) # Convert character to factor
summary(gapminder)
summary(gapminder_clean)
glimpse(gapminder)
glimpse(gapminder_clean)
# Clean up workspace ====
# rm(gapminder_clean, gapminder_from_csv)
| /R/03_tidy.R | no_license | UB-BiomedicalInformatics/orientation | R | false | false | 1,019 | r | #####################################################
##
## Tidy Gapminder data
## Workshop Date: 2018-7-11
## Prepared by Jamie Bono (jamie@buffalo.edu)
##
####################################################
# Examine the data structures ====
# Using base functions ----
View(gapminder)
View(gapminder_from_csv)
str(gapminder)
str(gapminder_from_csv)
summary(gapminder)
summary(gapminder_from_csv)
# Using dplyr functions ----
glimpse(gapminder)
glimpse(gapminder_from_csv)
###############################################
# Goal: To get gapminder_from_csv to match structure of gapminder
###############################################
gapminder_clean <- gapminder_from_csv %>%
mutate(country = as.factor(country)) %>% # Convert character to factor
mutate(continent = as.factor(continent)) # Convert character to factor
summary(gapminder)
summary(gapminder_clean)
glimpse(gapminder)
glimpse(gapminder_clean)
# Clean up workspace ====
# rm(gapminder_clean, gapminder_from_csv)
|
## These two functions are used to cache the inverse of a matrix
## rather than repeatedly performing costly computations
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
## Sample run:
## > x=rbind(c(1,1/8),c(1/8,1))
## > m=makeCacheMatrix(x)
## > m$get()
## [,1] [,2]
## [1,] 1.000 0.125
## [2,] 0.125 1.000
## > cacheSolve(m)
## [,1] [,2]
## [1,] 1.0158730 -0.1269841
## [2,] -0.1269841 1.0158730
## > cacheSolve(m)
## getting cached data.
## [,1] [,2]
## [1,] 1.0158730 -0.1269841
## [2,] -0.1269841 1.0158730
## > | /cachematrix.R | no_license | a-martinez0681/ProgrammingAssignment2 | R | false | false | 1,562 | r | ## These two functions are used to cache the inverse of a matrix
## rather than repeatedly performing costly computations
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
## Sample run:
## > x=rbind(c(1,1/8),c(1/8,1))
## > m=makeCacheMatrix(x)
## > m$get()
## [,1] [,2]
## [1,] 1.000 0.125
## [2,] 0.125 1.000
## > cacheSolve(m)
## [,1] [,2]
## [1,] 1.0158730 -0.1269841
## [2,] -0.1269841 1.0158730
## > cacheSolve(m)
## getting cached data.
## [,1] [,2]
## [1,] 1.0158730 -0.1269841
## [2,] -0.1269841 1.0158730
## > |
library("matrixStats")
rowProds_R <- function(x, FUN = prod, na.rm = FALSE, ...) {
apply(x, MARGIN = 1L, FUN = FUN, na.rm = na.rm)
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Subsetted tests
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
source("utils/validateIndicesFramework.R")
x <- matrix(runif(6 * 6, min = -6, max = 6), nrow = 6, ncol = 6)
storage.mode(x) <- "integer"
for (rows in index_cases) {
for (cols in index_cases) {
for (na.rm in c(TRUE, FALSE)) {
validateIndicesTestMatrix(x, rows, cols,
ftest = rowProds, fsure = rowProds_R,
method = "expSumLog",
FUN = product, na.rm = na.rm)
validateIndicesTestMatrix(x, rows, cols,
fcoltest = colProds, fsure = rowProds_R,
method = "expSumLog",
FUN = product, na.rm = na.rm)
}
}
}
| /tests/rowProds_subset.R | no_license | const-ae/matrixStats | R | false | false | 1,000 | r | library("matrixStats")
rowProds_R <- function(x, FUN = prod, na.rm = FALSE, ...) {
apply(x, MARGIN = 1L, FUN = FUN, na.rm = na.rm)
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Subsetted tests
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
source("utils/validateIndicesFramework.R")
x <- matrix(runif(6 * 6, min = -6, max = 6), nrow = 6, ncol = 6)
storage.mode(x) <- "integer"
for (rows in index_cases) {
for (cols in index_cases) {
for (na.rm in c(TRUE, FALSE)) {
validateIndicesTestMatrix(x, rows, cols,
ftest = rowProds, fsure = rowProds_R,
method = "expSumLog",
FUN = product, na.rm = na.rm)
validateIndicesTestMatrix(x, rows, cols,
fcoltest = colProds, fsure = rowProds_R,
method = "expSumLog",
FUN = product, na.rm = na.rm)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{plot_tree}
\alias{plot_tree}
\title{AAA}
\usage{
plot_tree(
data,
slingshot_curves,
gene,
rotate90 = F,
assay = "RNA",
edge.weights = F,
pal = c("grey90", "grey70", "blue3", "navy"),
minsize = 0.5,
sizefactor = 2,
...
)
}
\description{
AAA
}
\details{
AAA
}
| /man/plot_tree.Rd | permissive | czarnewski/niceRplots | R | false | true | 383 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{plot_tree}
\alias{plot_tree}
\title{AAA}
\usage{
plot_tree(
data,
slingshot_curves,
gene,
rotate90 = F,
assay = "RNA",
edge.weights = F,
pal = c("grey90", "grey70", "blue3", "navy"),
minsize = 0.5,
sizefactor = 2,
...
)
}
\description{
AAA
}
\details{
AAA
}
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 25
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.05
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%fm_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.6, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.55, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'fm')
| /sim_pgms/fm/do5/2xcontH0_sc25_do5_mice.R | no_license | yuliasidi/nibinom_apply | R | false | false | 3,318 | r | library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 25
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.05
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%fm_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.6, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = fm_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.55, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'fm')
|
library(ipred)
library(RecordLinkage)
library(ff)
# stopifnot(library(SOAR))
ri <- integer(10)
fi <- ff(vmode="integer", length = 10)
fb <- ff(vmode="byte", length = 10)
rb <- byte(length(fb))
fb <- ff(rb)
vmode(ri)
vmode(fi)
vmode(fb)
vmode(rb)
str(chunk(fd))
chunk(fd)
memory.limit()
library(RJDBC)
library(sqlutils)
drive <- JDBC(driverClass = "oracle.jdbc.OracleDriver", classPath = paste(getwd(), "/ojdbc6.jar", sep = ""), identifier.quote = " ")
con <- dbConnect(drv = drive, "jdbc:oracle:thin:@localhost:1521/xe", "hr", "admin")
si3.fonetic <- dbGetQuery(con,"SELECT * FROM SI3_FONETIC")
# http://mon-ip.awardspace.com/bytes_conversor.php
# format(x = object.size(si3.fonetic),quote = F, units = "MB")
# 117304288 / (1024) # KB mil bytes
# 117304288 / (1024 ^ 2) # MB mil kilobytes
# 117304288 / (1024 ^ 3) # GB mil megabytes
# 117304288 / (1024 ^ 4)
object.size(ls())
# registros que nao possue valor no campo 5
si3.fonetic[is.na(si3.fonetic[, 5]), ]
si3.fonetic[!is.na(si3.fonetic[, 5]), ]
nrow(si3.fonetic[is.na(si3.fonetic[, 5]), ])
nrow(si3.fonetic)
# sqlexec(connection = "jdbc:oracle:thin:@localhost:1521/xe", sql = "SELECT * FROM SI3_FONETIC")
############################################################################################
# trabalhando com o banco do sistema SIM fonetizado
sim.fonetic <- dbGetQuery(con, "SELECT * FROM SIM_FONETIC")
object.size(x = sim.fonetic) #1093.76 MB
object.size(ls())
# registros que nao possuem valor no campo 5
# sim.fonetic[is.na(sim.fonetic[, 5]), ]
# transformando os campos SEXO e FO_CD_TODOS_NM em factor
sim.fonetic[, c(1,3)] <- as.factor(sim.fonetic[, c(1,3)])
is.factor(x = sim.fonetic[1:100, 3])
# teste com a biblioteca SOAR
library(SOAR)
source(file = "UtilsRecordLinkage.R")
nrow(sim.fonetic)
sim.fonetic.na <- listNa(X = sim.fonetic, fields = 5)
SOAR::Store(sim.fonetic.na)
nrow(sim.fonetic.na)
# listNa(X = sim.fonetic, fields = c(2, 5))[1:3, ]
# listNotNa(X = sim.fonetic, fields = c(2, 5))[1:3, ]
# nrow(sim.fonetic[sim.fonetic[, 1] == "I", ])
# nrow(sim.fonetic[sim.fonetic[, 1] == "F", ])
# nrow(sim.fonetic[sim.fonetic[, 1] == "M", ])
sim.fonetic.notna <- listNotNa(X = sim.fonetic, fields = 5)
# numero de registros que nao possue nao possuem valor no campo 5
# nrow(sim.fonetic[is.na(sim.fonetic[, 5]), ])
nrow(sim.fonetic.notna) #com o nome da mae: 9696098
SOAR::Store(sim.fonetic.notna)
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 2)
nrow(sim.fonetic.notna) #com data de nascimento: 9667047
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 1)
nrow(sim.fonetic.notna) #com sexo: 9667047
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 3)
nrow(sim.fonetic.notna)# com FO_CD_TODOS_NM #9667047
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 4)
nrow(sim.fonetic.notna)# com FO_CD_PRI_ULT_NM #9643392
SOAR::Store(sim.fonetic.notna)
id.sim.notna <- as.integer(row.names(x = sim.fonetic.notna))
# attributes(RLdata500)
# levels(x = RLdata500[1, 6])
# attributes(sim.fonetic.notna)
# levels(x = sim.fonetic.notna[, 1])
dedup.sim.fonetic <- RecordLinkage::compare.dedup(dataset = sim.fonetic.notna[1:300, ], blockfld = list(1, 2, 3), strcmp = T, identity = id.sim.notna[1:300])
SOAR::Store(dedup.sim.fonetic)
# analise dos pares que a biblioteca comparou para achar os duplicados
pairs.sim.fonetic <- dedup.sim.fonetic$pairs
pairs.sim.fonetic[pairs.sim.fonetic[, 8] == 1, ]
pairs.sim.fonetic[pairs.sim.fonetic[, c(4)] >= 0.8, ]
pairs.sim.fonetic[pairs.sim.fonetic[, c(4)] >= 0.8 & pairs.sim.fonetic[, c(5)] >= 0.7, ]
sim.fonetic.notna[c(86, 143), ]
nrow(pairs.sim.fonetic[pairs.sim.fonetic[, c(4)] >= 0.8, ])
sim.fonetic.notna[c(4, 183), ]
sim.fonetic.notna[!is.na(sim.fonetic.notna[, 2]), ]
system.time(expr = !is.na(sim.fonetic.notna[, 2]))
listNa(X = sim.fonetic.notna, fields = 2)
listNotNa(X = sim.fonetic.notna, fields = 2)
# fim analise dos pares que a biblioteca comparou para achar os duplicados
rm(list = ls())
ls()
lapply(ls(), function(x) object.size(x))
X <- data.frame(rnorm(1000000*50)) # 381 megabytes
vars <- var(X)
cols <- colMeans(X)
SOAR::Store(X, vars)
SOAR::Store(cols)
SOAR::Store(RLdata500, identity.RLdata500)
find(what = "sim.fonetic")
SOAR::StoreData(sim.fonetic)
SOAR::Store(SOAR::LsData())
SOAR::Ls() # return objects stored by Soar::Store
objects()
# teste com RLdata
nrow(RLdata500[is.na(RLdata500[, c(1)]), ]) # 0
nrow(RLdata500[is.na(RLdata500[, c(2)]), ]) # 472
nrow(RLdata500[is.na(RLdata500[, c(3)]), ]) # 0
nrow(RLdata500[is.na(RLdata500[, c(4)]), ]) # 492
nrow(RLdata500[is.na(RLdata500[, c(5)]), ]) # 0
#pessoas que possuem o segundo nome mas nao o utlimo
sn.rldata500 <- RLdata500[!is.na(RLdata500[, c(2)]), ]
sn.rldata500[!is.na(sn.rldata500[, c(4)]), ]
#pessoas que possuem o utlimo nome mas nao o utlimo mas nao o segundo
ln.rldata500 <- RLdata500[!is.na(RLdata500[, c(4)]), ]
ln.rldata500[!is.na(ln.rldata500[, c(2)]), ]
# vect <- c(1:10)
# vect[ifelse(test = vect %% 2 == 0, yes = T, no = F)]
?cacheQuery
# https://stat.ethz.ch/R-manual/R-devel/library/utils/html/object.size.html
object.size(x = sim.fonetic) / (1024 ^ 2)
format(x = object.size(x = sim.fonetic), quote = F, units = "MB")
format(x = object.size(x = sim.fonetic), quote = F, units = "auto")
?memory.size # retorna a atual quantidade de momoria alocada atual ou a maxima
# pela funcao malloc usada no R, dependendo do parametro max que eh booleano
# se max = T retorna a maxima quantidade de memoria obtida pelo SO, do contrario
# retorna a quantidade atualmente usada se NA retorna a quantidade limite
?system.time # retorna o tem que a CPU demorou para processar uma expressao
?unix.time
system.time(x <- runif(1e9) + runif(1E3))
print(x, maxlength=4)
memory.size(max=FALSE)
memory.size(max=TRUE)
system.time(x <- runif(1e7) + runif(1E7))
memory.size(max=FALSE)
memory.size(max=TRUE)
memory.size(max=NA)
| /rscripts/RecordLinkageStudy/CachingV2.R | no_license | yngcan/R-programming | R | false | false | 5,968 | r | library(ipred)
library(RecordLinkage)
library(ff)
# stopifnot(library(SOAR))
ri <- integer(10)
fi <- ff(vmode="integer", length = 10)
fb <- ff(vmode="byte", length = 10)
rb <- byte(length(fb))
fb <- ff(rb)
vmode(ri)
vmode(fi)
vmode(fb)
vmode(rb)
str(chunk(fd))
chunk(fd)
memory.limit()
library(RJDBC)
library(sqlutils)
drive <- JDBC(driverClass = "oracle.jdbc.OracleDriver", classPath = paste(getwd(), "/ojdbc6.jar", sep = ""), identifier.quote = " ")
con <- dbConnect(drv = drive, "jdbc:oracle:thin:@localhost:1521/xe", "hr", "admin")
si3.fonetic <- dbGetQuery(con,"SELECT * FROM SI3_FONETIC")
# http://mon-ip.awardspace.com/bytes_conversor.php
# format(x = object.size(si3.fonetic),quote = F, units = "MB")
# 117304288 / (1024) # KB mil bytes
# 117304288 / (1024 ^ 2) # MB mil kilobytes
# 117304288 / (1024 ^ 3) # GB mil megabytes
# 117304288 / (1024 ^ 4)
object.size(ls())
# registros que nao possue valor no campo 5
si3.fonetic[is.na(si3.fonetic[, 5]), ]
si3.fonetic[!is.na(si3.fonetic[, 5]), ]
nrow(si3.fonetic[is.na(si3.fonetic[, 5]), ])
nrow(si3.fonetic)
# sqlexec(connection = "jdbc:oracle:thin:@localhost:1521/xe", sql = "SELECT * FROM SI3_FONETIC")
############################################################################################
# trabalhando com o banco do sistema SIM fonetizado
sim.fonetic <- dbGetQuery(con, "SELECT * FROM SIM_FONETIC")
object.size(x = sim.fonetic) #1093.76 MB
object.size(ls())
# registros que nao possuem valor no campo 5
# sim.fonetic[is.na(sim.fonetic[, 5]), ]
# transformando os campos SEXO e FO_CD_TODOS_NM em factor
sim.fonetic[, c(1,3)] <- as.factor(sim.fonetic[, c(1,3)])
is.factor(x = sim.fonetic[1:100, 3])
# teste com a biblioteca SOAR
library(SOAR)
source(file = "UtilsRecordLinkage.R")
nrow(sim.fonetic)
sim.fonetic.na <- listNa(X = sim.fonetic, fields = 5)
SOAR::Store(sim.fonetic.na)
nrow(sim.fonetic.na)
# listNa(X = sim.fonetic, fields = c(2, 5))[1:3, ]
# listNotNa(X = sim.fonetic, fields = c(2, 5))[1:3, ]
# nrow(sim.fonetic[sim.fonetic[, 1] == "I", ])
# nrow(sim.fonetic[sim.fonetic[, 1] == "F", ])
# nrow(sim.fonetic[sim.fonetic[, 1] == "M", ])
sim.fonetic.notna <- listNotNa(X = sim.fonetic, fields = 5)
# numero de registros que nao possue nao possuem valor no campo 5
# nrow(sim.fonetic[is.na(sim.fonetic[, 5]), ])
nrow(sim.fonetic.notna) #com o nome da mae: 9696098
SOAR::Store(sim.fonetic.notna)
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 2)
nrow(sim.fonetic.notna) #com data de nascimento: 9667047
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 1)
nrow(sim.fonetic.notna) #com sexo: 9667047
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 3)
nrow(sim.fonetic.notna)# com FO_CD_TODOS_NM #9667047
sim.fonetic.notna <- listNotNa(X = sim.fonetic.notna, fields = 4)
nrow(sim.fonetic.notna)# com FO_CD_PRI_ULT_NM #9643392
SOAR::Store(sim.fonetic.notna)
id.sim.notna <- as.integer(row.names(x = sim.fonetic.notna))
# attributes(RLdata500)
# levels(x = RLdata500[1, 6])
# attributes(sim.fonetic.notna)
# levels(x = sim.fonetic.notna[, 1])
dedup.sim.fonetic <- RecordLinkage::compare.dedup(dataset = sim.fonetic.notna[1:300, ], blockfld = list(1, 2, 3), strcmp = T, identity = id.sim.notna[1:300])
SOAR::Store(dedup.sim.fonetic)
# analise dos pares que a biblioteca comparou para achar os duplicados
pairs.sim.fonetic <- dedup.sim.fonetic$pairs
pairs.sim.fonetic[pairs.sim.fonetic[, 8] == 1, ]
pairs.sim.fonetic[pairs.sim.fonetic[, c(4)] >= 0.8, ]
pairs.sim.fonetic[pairs.sim.fonetic[, c(4)] >= 0.8 & pairs.sim.fonetic[, c(5)] >= 0.7, ]
sim.fonetic.notna[c(86, 143), ]
nrow(pairs.sim.fonetic[pairs.sim.fonetic[, c(4)] >= 0.8, ])
sim.fonetic.notna[c(4, 183), ]
sim.fonetic.notna[!is.na(sim.fonetic.notna[, 2]), ]
system.time(expr = !is.na(sim.fonetic.notna[, 2]))
listNa(X = sim.fonetic.notna, fields = 2)
listNotNa(X = sim.fonetic.notna, fields = 2)
# fim analise dos pares que a biblioteca comparou para achar os duplicados
rm(list = ls())
ls()
lapply(ls(), function(x) object.size(x))
X <- data.frame(rnorm(1000000*50)) # 381 megabytes
vars <- var(X)
cols <- colMeans(X)
SOAR::Store(X, vars)
SOAR::Store(cols)
SOAR::Store(RLdata500, identity.RLdata500)
find(what = "sim.fonetic")
SOAR::StoreData(sim.fonetic)
SOAR::Store(SOAR::LsData())
SOAR::Ls() # return objects stored by Soar::Store
objects()
# teste com RLdata
nrow(RLdata500[is.na(RLdata500[, c(1)]), ]) # 0
nrow(RLdata500[is.na(RLdata500[, c(2)]), ]) # 472
nrow(RLdata500[is.na(RLdata500[, c(3)]), ]) # 0
nrow(RLdata500[is.na(RLdata500[, c(4)]), ]) # 492
nrow(RLdata500[is.na(RLdata500[, c(5)]), ]) # 0
#pessoas que possuem o segundo nome mas nao o utlimo
sn.rldata500 <- RLdata500[!is.na(RLdata500[, c(2)]), ]
sn.rldata500[!is.na(sn.rldata500[, c(4)]), ]
#pessoas que possuem o utlimo nome mas nao o utlimo mas nao o segundo
ln.rldata500 <- RLdata500[!is.na(RLdata500[, c(4)]), ]
ln.rldata500[!is.na(ln.rldata500[, c(2)]), ]
# vect <- c(1:10)
# vect[ifelse(test = vect %% 2 == 0, yes = T, no = F)]
?cacheQuery
# https://stat.ethz.ch/R-manual/R-devel/library/utils/html/object.size.html
object.size(x = sim.fonetic) / (1024 ^ 2)
format(x = object.size(x = sim.fonetic), quote = F, units = "MB")
format(x = object.size(x = sim.fonetic), quote = F, units = "auto")
?memory.size # retorna a atual quantidade de momoria alocada atual ou a maxima
# pela funcao malloc usada no R, dependendo do parametro max que eh booleano
# se max = T retorna a maxima quantidade de memoria obtida pelo SO, do contrario
# retorna a quantidade atualmente usada se NA retorna a quantidade limite
?system.time # retorna o tem que a CPU demorou para processar uma expressao
?unix.time
system.time(x <- runif(1e9) + runif(1E3))
print(x, maxlength=4)
memory.size(max=FALSE)
memory.size(max=TRUE)
system.time(x <- runif(1e7) + runif(1E7))
memory.size(max=FALSE)
memory.size(max=TRUE)
memory.size(max=NA)
|
rankall <- function(outcome,num='best'){
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
if (outcome == 'heart attack'){
x <- 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack'
} else if (outcome == 'heart failure') {
x <- 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure'
} else if (outcome == 'pneumonia') {
x <- 'Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia'
} else {
stop('invalid outcome')
}
Y <- split(data[x],data$State)
name <- split(data$Hospital.Name,data$State)
name2 <- split(data$State,data$State)
numb_state <- length(as.character(name)) ## this counts how many different states (group)
#print(numb_state)
hospital <- c( )
state_name <- c( )
for (state in 1 : numb_state){
hospital_name <- name[[state]]
name3 <- name2[[state]]
state_name[state] <- name3[1]
#print(name3[1])
sample_numb <- length(hospital_name)
k <- as.numeric(unlist(Y[state])) # to convert the characters from table to numbers
#print(k)
rank <- order(k,na.last =TRUE) # this gives position of k with rate in ascending order.
raterank <- k[rank[1:sample_numb]] # this rearrange a vector of k with rates in ascending order.
b <- is.na(k)
last_rank <- length(b[b==FALSE]) # this gives the number of non-NA elements of rearranged vector.
namerank <- hospital_name[rank[1:sample_numb]] # this gives a vector of hospital names with rate...
#print(data.frame(Hospital.Name=namerank,Rate=raterank,Rank=1:sample_numb))
#print(last_rank)
# the following logical is to determine, best, last, or certain rank
if (num =='best'){
rankth =1
} else if (num == 'worst') {
rankth =last_rank
} else {
rankth = num
}
#print(rankth)
sameraterearrange <- function(namerank,raterank){
## this function is used to rearrange the vector with rate and alphabetically.
name <- split(namerank,raterank)
numb_rate <- length(as.character(name))
#print(numb_rate)
#print(sapply(name,sort))
name2 <- sapply(name,sort)
return(unsplit(name2,raterank))
}
#print(data.frame(Hospital.Name=namerank[1:rankth],Rate=raterank[1:rankth],Rank=1:rankth))
namerank <- sameraterearrange(namerank,raterank)
#print(data.frame(Hospital.Name=namerank[1:rankth],Rate=raterank[1:rankth],Rank=1:rankth))
hospital[state] <- namerank[rankth]
}
result <- data.frame(state=state_name,hospital=hospital)#,check.rows = FALSE)
return(result)
}
| /R_programming/assign3-rankall.R | no_license | shepherdmeng/data_science_coursera | R | false | false | 3,145 | r | rankall <- function(outcome,num='best'){
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
if (outcome == 'heart attack'){
x <- 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack'
} else if (outcome == 'heart failure') {
x <- 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure'
} else if (outcome == 'pneumonia') {
x <- 'Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia'
} else {
stop('invalid outcome')
}
Y <- split(data[x],data$State)
name <- split(data$Hospital.Name,data$State)
name2 <- split(data$State,data$State)
numb_state <- length(as.character(name)) ## this counts how many different states (group)
#print(numb_state)
hospital <- c( )
state_name <- c( )
for (state in 1 : numb_state){
hospital_name <- name[[state]]
name3 <- name2[[state]]
state_name[state] <- name3[1]
#print(name3[1])
sample_numb <- length(hospital_name)
k <- as.numeric(unlist(Y[state])) # to convert the characters from table to numbers
#print(k)
rank <- order(k,na.last =TRUE) # this gives position of k with rate in ascending order.
raterank <- k[rank[1:sample_numb]] # this rearrange a vector of k with rates in ascending order.
b <- is.na(k)
last_rank <- length(b[b==FALSE]) # this gives the number of non-NA elements of rearranged vector.
namerank <- hospital_name[rank[1:sample_numb]] # this gives a vector of hospital names with rate...
#print(data.frame(Hospital.Name=namerank,Rate=raterank,Rank=1:sample_numb))
#print(last_rank)
# the following logical is to determine, best, last, or certain rank
if (num =='best'){
rankth =1
} else if (num == 'worst') {
rankth =last_rank
} else {
rankth = num
}
#print(rankth)
sameraterearrange <- function(namerank,raterank){
## this function is used to rearrange the vector with rate and alphabetically.
name <- split(namerank,raterank)
numb_rate <- length(as.character(name))
#print(numb_rate)
#print(sapply(name,sort))
name2 <- sapply(name,sort)
return(unsplit(name2,raterank))
}
#print(data.frame(Hospital.Name=namerank[1:rankth],Rate=raterank[1:rankth],Rank=1:rankth))
namerank <- sameraterearrange(namerank,raterank)
#print(data.frame(Hospital.Name=namerank[1:rankth],Rate=raterank[1:rankth],Rank=1:rankth))
hospital[state] <- namerank[rankth]
}
result <- data.frame(state=state_name,hospital=hospital)#,check.rows = FALSE)
return(result)
}
|
h5readDataset <- function (h5dataset, index = NULL, start = NULL, stride = NULL,
block = NULL, count = NULL, compoundAsDataFrame = TRUE, drop = FALSE, ...) {
try({
h5spaceFile <- H5Dget_space(h5dataset)
on.exit(H5Sclose(h5spaceFile))
})
h5spaceMem = NULL
if (!is.null(index)) {
s <- H5Sget_simple_extent_dims(h5spaceFile)$size
if (length(index) != length(s)) {
stop("length of index has to be equal to dimensional extension of HDF5 dataset.")
}
for (i in seq_len(length(index))) {
if (is.null(index[[i]])) {
index[[i]] = seq_len(s[i])
## if we passed an object to the index, we need to get its values
} else if ( is.name(index[[i]]) | is.call(index[[i]]) ) {
index[[i]] <- eval(index[[i]])
}
}
size = 0
try({
size = H5Sselect_index(h5spaceFile, index)
})
h5spaceMem = H5Screate_simple(size, native = h5dataset@native)
on.exit(H5Sclose(h5spaceMem), add = TRUE)
}
else {
if (any(c(!is.null(start), !is.null(stride),
!is.null(count), !is.null(block)))) {
size = 0
try({
size = H5Sselect_hyperslab(h5spaceFile,
start = start, stride = stride, count = count,
block = block)
})
h5spaceMem = H5Screate_simple(size, native = h5dataset@native)
on.exit(H5Sclose(h5spaceMem), add = TRUE)
}
}
obj <- NULL
try({
obj <- H5Dread(h5dataset = h5dataset, h5spaceFile = h5spaceFile,
h5spaceMem = h5spaceMem,
compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
})
#if (!is.null(h5spaceMem)) {
# try({
# H5Sclose(h5spaceMem)
# })
#}
if (!is.null(index)) {
I = list()
for (i in seq_len(length(index))) {
tmp = unique(sort(index[[i]]))
I[[i]] = match(index[[i]], tmp)
}
#####################
## This can take a long time for large datasets
## can we find rules for skipping it?
#obj <- do.call("[", c(list(obj), I, drop = FALSE))
obj.dim <- lapply(dim(obj), FUN = seq_len)
if(!identical(I, obj.dim)) {
obj <- do.call("[", c(list(obj), I, drop = FALSE))
}
####################
}
#try({
# H5Sclose(h5spaceFile)
#})
obj
}
h5read <- function(file, name, index=NULL, start=NULL, stride=NULL, block=NULL, count=NULL, compoundAsDataFrame = TRUE, callGeneric = TRUE, read.attributes=FALSE, drop = FALSE, ..., native = FALSE) {
loc = h5checktypeOrOpenLoc(file, readonly=TRUE, native = native)
on.exit( h5closeitLoc(loc) )
if (!H5Lexists(loc$H5Identifier, name)) {
stop("Object '", name, "' does not exist in this HDF5 file.")
} else {
oid = H5Oopen(loc$H5Identifier, name)
type = H5Iget_type(oid)
num_attrs = H5Oget_num_attrs(oid)
if (is.na(num_attrs)) { num_attrs = 0 }
H5Oclose(oid)
if (type == "H5I_GROUP") {
gid <- H5Gopen(loc$H5Identifier, name)
obj = h5dump(gid, start=start, stride=stride, block=block, count=count, compoundAsDataFrame = compoundAsDataFrame, callGeneric = callGeneric, ...)
H5Gclose(gid)
} else {
if (type == "H5I_DATASET") {
try( { h5dataset <- H5Dopen(loc$H5Identifier, name) } )
obj <- h5readDataset(h5dataset, index = index, start = start, stride = stride,
block = block, count = count, compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
try( { H5Dclose(h5dataset) } )
cl <- attr(obj,"class")
if (!is.null(cl) & callGeneric) {
if (exists(paste("h5read",cl,sep="."),mode="function")) {
obj <- do.call(paste("h5read",cl,sep="."), args=list(obj = obj))
}
}
} else {
message("Reading of object type not supported.")
obj <- NULL
} ## DATASET
} ## GROUP
if (read.attributes & (num_attrs > 0) & !is.null(obj)) {
for (i in seq_len(num_attrs)) {
A = H5Aopen_by_idx(loc$H5Identifier, n = i-1, objname = name)
attrname <- H5Aget_name(A)
if (attrname != "dim") {
attr(obj, attrname) = H5Aread(A)
}
H5Aclose(A)
}
}
} # !H5Lexists
obj
}
| /R/h5read.R | no_license | hpages/rhdf5 | R | false | false | 4,820 | r | h5readDataset <- function (h5dataset, index = NULL, start = NULL, stride = NULL,
block = NULL, count = NULL, compoundAsDataFrame = TRUE, drop = FALSE, ...) {
try({
h5spaceFile <- H5Dget_space(h5dataset)
on.exit(H5Sclose(h5spaceFile))
})
h5spaceMem = NULL
if (!is.null(index)) {
s <- H5Sget_simple_extent_dims(h5spaceFile)$size
if (length(index) != length(s)) {
stop("length of index has to be equal to dimensional extension of HDF5 dataset.")
}
for (i in seq_len(length(index))) {
if (is.null(index[[i]])) {
index[[i]] = seq_len(s[i])
## if we passed an object to the index, we need to get its values
} else if ( is.name(index[[i]]) | is.call(index[[i]]) ) {
index[[i]] <- eval(index[[i]])
}
}
size = 0
try({
size = H5Sselect_index(h5spaceFile, index)
})
h5spaceMem = H5Screate_simple(size, native = h5dataset@native)
on.exit(H5Sclose(h5spaceMem), add = TRUE)
}
else {
if (any(c(!is.null(start), !is.null(stride),
!is.null(count), !is.null(block)))) {
size = 0
try({
size = H5Sselect_hyperslab(h5spaceFile,
start = start, stride = stride, count = count,
block = block)
})
h5spaceMem = H5Screate_simple(size, native = h5dataset@native)
on.exit(H5Sclose(h5spaceMem), add = TRUE)
}
}
obj <- NULL
try({
obj <- H5Dread(h5dataset = h5dataset, h5spaceFile = h5spaceFile,
h5spaceMem = h5spaceMem,
compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
})
#if (!is.null(h5spaceMem)) {
# try({
# H5Sclose(h5spaceMem)
# })
#}
if (!is.null(index)) {
I = list()
for (i in seq_len(length(index))) {
tmp = unique(sort(index[[i]]))
I[[i]] = match(index[[i]], tmp)
}
#####################
## This can take a long time for large datasets
## can we find rules for skipping it?
#obj <- do.call("[", c(list(obj), I, drop = FALSE))
obj.dim <- lapply(dim(obj), FUN = seq_len)
if(!identical(I, obj.dim)) {
obj <- do.call("[", c(list(obj), I, drop = FALSE))
}
####################
}
#try({
# H5Sclose(h5spaceFile)
#})
obj
}
h5read <- function(file, name, index=NULL, start=NULL, stride=NULL, block=NULL, count=NULL, compoundAsDataFrame = TRUE, callGeneric = TRUE, read.attributes=FALSE, drop = FALSE, ..., native = FALSE) {
loc = h5checktypeOrOpenLoc(file, readonly=TRUE, native = native)
on.exit( h5closeitLoc(loc) )
if (!H5Lexists(loc$H5Identifier, name)) {
stop("Object '", name, "' does not exist in this HDF5 file.")
} else {
oid = H5Oopen(loc$H5Identifier, name)
type = H5Iget_type(oid)
num_attrs = H5Oget_num_attrs(oid)
if (is.na(num_attrs)) { num_attrs = 0 }
H5Oclose(oid)
if (type == "H5I_GROUP") {
gid <- H5Gopen(loc$H5Identifier, name)
obj = h5dump(gid, start=start, stride=stride, block=block, count=count, compoundAsDataFrame = compoundAsDataFrame, callGeneric = callGeneric, ...)
H5Gclose(gid)
} else {
if (type == "H5I_DATASET") {
try( { h5dataset <- H5Dopen(loc$H5Identifier, name) } )
obj <- h5readDataset(h5dataset, index = index, start = start, stride = stride,
block = block, count = count, compoundAsDataFrame = compoundAsDataFrame, drop = drop, ...)
try( { H5Dclose(h5dataset) } )
cl <- attr(obj,"class")
if (!is.null(cl) & callGeneric) {
if (exists(paste("h5read",cl,sep="."),mode="function")) {
obj <- do.call(paste("h5read",cl,sep="."), args=list(obj = obj))
}
}
} else {
message("Reading of object type not supported.")
obj <- NULL
} ## DATASET
} ## GROUP
if (read.attributes & (num_attrs > 0) & !is.null(obj)) {
for (i in seq_len(num_attrs)) {
A = H5Aopen_by_idx(loc$H5Identifier, n = i-1, objname = name)
attrname <- H5Aget_name(A)
if (attrname != "dim") {
attr(obj, attrname) = H5Aread(A)
}
H5Aclose(A)
}
}
} # !H5Lexists
obj
}
|
# # 7章
# ## 7.2
# In[1]:
library(dplyr)
# In[2]:
library(tidyr)
# In[3]:
library(ggplot2)
# In[4]:
d.room <- data_frame(
kaiteki = c(6, 5, 6, 7, 2, 1, 1, 0, 7, 8, 8, 9, 8, 7, 6, 7),
size = c(rep("S", 8), rep("L", 8)),
student = c(rep(1, 4), rep(2, 4), rep(1, 4), rep(2, 4))
)
head(d.room)
# In[164]:
write_csv(d.room, "../data/table7-1.csv")
# In[5]:
str(d.room)
# p.98
#
# 誤
#
# ```r
# summary(aov(y ~ size + student + size:student))
# ```
#
# 正
#
# ```r
# summary(aov(kaiteki ~ size + student + size:student))
# ```
#
# In[8]:
d.room %>% aov(kaiteki ~ size + student + size:student, data = .) %>% summary()
# In[9]:
options(repr.plot.width = 3, repr.plot.height = 3)
# In[30]:
d.room %>% mutate(size=factor(size, levels=c("S", "L"), labels=c("S", "L")),
student=factor(student)) %>%
group_by(size, student) %>% summarise(kaiteki = mean(kaiteki)) %>%
ggplot(aes(x = size, y = kaiteki, group=student, colour=student)) + geom_line() + geom_point()
# In[31]:
d.room %>% mutate(size=factor(size, levels=c("S", "L"), labels=c("S", "L")),
student=factor(student)) %>%
group_by(student, size) %>% summarise(kaiteki = mean(kaiteki)) %>%
ggplot(aes(x = student, y = kaiteki, group=size, colour=size)) + geom_line() + geom_point()
# In[32]:
library(readr)
# In[50]:
d.soil <- read_csv("../samplecode/Rで学ぶ統計学入門図版作成用/table7-4.csv", col_names = FALSE)
str(d.soil)
# In[52]:
d.soil %>% select(plant=X1) %>% mutate(soil = c(rep(1, 10), rep(2, 10), rep(1, 10), rep(2, 10)),
ft = c(rep(1, 20), rep(2, 20))) -> d.soil
d.soil
# In[163]:
write_csv(d.soil, "../data/table7-2.csv")
# In[54]:
d.soil %>% aov(plant ~ soil + ft + soil:ft, data = .) %>% summary()
# In[59]:
d.soil %>% mutate(soil = factor(soil, levels=c(1, 2), labels = c("natural", "agricultural")), ft = factor(ft, levels = c(1, 2), labels = c("C", "F"))) %>%
group_by(soil, ft) %>% summarise(plant=mean(plant)) %>%
ggplot(aes(x = soil, y = plant, group=ft, colour=ft)) + geom_line() + geom_point()
# F値の計算
# In[61]:
d.soil %>% group_by(soil, ft) %>% summarise(plant=mean(plant))
# In[62]:
d.soil %>% select(-ft) %>% group_by(soil) %>% summarise(plant=mean(plant))
# In[63]:
d.soil %>% select(-soil) %>% group_by(ft) %>% summarise(plant=mean(plant))
# In[67]:
d.soil %>% summarise(plant=mean(plant))
# In[97]:
d.soil %>% group_by(soil, ft) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - 32.21)**2) %>% ungroup() %>% summarise(sum(x))
# In[80]:
d.soil %>% group_by(soil) %>% summarise(x = mean(plant), n = n()) %>% mutate(x = n * (x - 32.21)**2) %>% summarise(sum(x))
# In[81]:
d.soil %>% group_by(ft) %>% summarise(x = mean(plant), n = n()) %>% mutate(x = n * (x - 32.21)**2) %>% summarise(sum(x))
# In[88]:
d.soil %>% group_by(soil, ft) %>% mutate(x = (plant - mean(plant))**2 ) %>% summarise(x = sum(x)) %>% ungroup() %>% summarise(x = sum(x))
# In[110]:
calc_f_value <- function(df){
cat("要因", "SS", "df", "MS", "F", "\n")
n <- 10
n.soil <- 2
n.ft <- 2
mean.soil.ft <- df %>% group_by(soil, ft) %>% summarise(plant=mean(plant)) %>% .[["plant"]]
mean.soil <- df %>% select(-ft) %>% group_by(soil) %>% summarise(plant=mean(plant)) %>% .[["plant"]]
mean.ft <- df %>% select(-soil) %>% group_by(ft) %>% summarise(plant=mean(plant)) %>% .[["plant"]]
mean_ <- df %>% summarise(plant=mean(plant)) %>% .[["plant"]]
Y.SS <- df %>% mutate(x = (plant - mean_)**2) %>% summarise(Y.SS = sum(x)) %>% .[["Y.SS"]]
df.all <- n.soil * n.ft * n * 1
among.SS <- df %>% group_by(soil, ft) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - mean_)**2) %>% ungroup() %>% summarise(among.SS = sum(x)) %>% .[["among.SS"]]
df.among <- n.soil * n.ft - 1
among.MS <- among.SS / df.among
cat("グループ間", among.SS, df.among, "\n")
soil.SS <- df %>% group_by(soil) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - mean_)**2) %>% summarise(soil.SS = sum(x)) %>% .[["soil.SS"]]
df.soil <- n.soil - 1
soil.MS <- soil.SS / df.soil
ft.SS <- df %>% group_by(ft) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - mean_)**2) %>% summarise(ft.SS = sum(x)) %>% .[["ft.SS"]]
df.ft <- n.ft - 1
ft.MS <- ft.SS / df.ft
inter.SS <- among.SS - soil.SS - ft.SS
df.inter <- (n.soil - 1) * (n.ft - 1)
inter.MS <- inter.SS / df.inter
within.SS <- df %>% group_by(soil, ft) %>%
mutate(x = (plant - mean(plant))**2 ) %>% summarise(x = sum(x)) %>% ungroup() %>% summarise(within.SS = sum(x)) %>% .[["within.SS"]]
df.within <- n.soil * n.ft * (n - 1)
within.MS <- within.SS / df.within
F.soil <- soil.MS / within.MS
cat("土壌効果", soil.SS, df.soil, soil.MS, F.soil, "\n")
F.ft <- ft.MS / within.MS
cat("施肥効果", ft.SS, df.ft, ft.MS, F.ft, "\n")
F.inter <- inter.MS / within.MS
cat("交互効果", inter.SS, df.inter, inter.MS, F.ft, "\n")
cat("グループ内", within.SS, df.within, within.SS, "\n")
cat("総合計", Y.SS, df.all, "\n")
}
calc_f_value(d.soil)
# ## 7.4 線形混合モデル: 固定要因とランダム変量要因を取り込む
# In[111]:
d.pig <- read_csv("../samplecode/Rで学ぶ統計学入門図版作成用/table7-5.csv")
str(d.pig)
# In[162]:
write_csv(d.pig, "../data/table7-5.csv")
# In[112]:
head(d.pig)
# In[114]:
options(repr.plot.width=6)
# In[116]:
summary(d.pig)
# In[117]:
d.pig %>% group_by(treat) %>% summarise(mean(wt))
# In[133]:
d.pig %>% ggplot(aes(x = number, y = wt, colour=treat)) + geom_point() +
geom_hline(yintercept = 111.5, linetype=2, colour=2) +
geom_hline(yintercept = 125.06, linetype=2, colour=4) +
geom_hline(yintercept = 118.3, linetype=2)
# In[136]:
d.pig %>% mutate(block=factor(block), treat=factor(treat)) %>% aov(wt ~ treat + Error(block / treat), data=.) %>% summary()
# ## 演習問題
# In[139]:
d.mouse <- data_frame(
wt = c(55.4, 49.7, 52.1, 49.5, 53.2, 51.4, 54.3,
47.2, 49.4, 51.3, 54.5, 48.1, 50.8, 52.7,
50.5, 48.2, 48.4, 52.1, 51.8, 49.7, 49.2,
47.3, 46.2, 48.8, 50.1, 48.2, 47.0, 46.5),
feed = c(rep("A", 7*2), rep("B", 7*2)),
gender = c(rep("M", 7), rep("F", 7), rep("M", 7), rep("F", 7))
)
head(d.mouse)
# In[161]:
write_csv(d.mouse, "../data/table-ex7-2.csv")
# In[141]:
options(repr.plot.width=3)
# In[142]:
d.mouse %>% ggplot(aes(x = feed, y = wt, group=gender, colour=gender)) + geom_point()
# In[145]:
d.mouse %>% mutate(feed=factor(feed), gender=factor(gender)) %>% group_by(feed, gender) %>% summarise(wt=mean(wt)) %>%
ggplot(aes(x=feed, y=wt, group=gender, colour=gender)) + geom_line() + geom_point()
# In[146]:
d.mouse %>% aov(wt ~ feed + gender + feed:gender, data=.)
# In[147]:
d.mouse %>% aov(wt ~ feed + gender + feed:gender, data=.) %>% summary()
# ### 演習問題7.3
# In[149]:
d.exam <- data_frame(
indiv = c(1:36),
score = c(78, 72, 81, 71, 74, 72,
85, 83, 79, 77, 75, 77,
83, 86, 74, 76, 73, 71,
66, 71, 72, 60, 58, 55,
58, 49, 62, 69, 70, 63,
65, 59, 54, 67, 66, 67),
juku = c(rep(1, 18), rep(2, 18)),
class = rep(c(rep("A", 6), rep("B", 6), rep("C", 6)), 2)
)
head(d.exam)
# In[160]:
write_csv(d.exam, "../data/table-ex7-3.csv")
# In[151]:
options(repr.plot.width = 6)
# In[155]:
d.exam %>% ggplot(aes(x = indiv, y = score, shape=factor(juku), colour=class)) + geom_point()
# In[157]:
d.exam %>% mutate(juku=factor(juku), class=factor(class)) %>%
aov(score ~ juku + Error(class / juku), data=.)
# In[158]:
d.exam %>% mutate(juku=factor(juku), class=factor(class)) %>%
aov(score ~ juku + Error(class / juku), data=.) %>% summary()
# In[33]:
devtools::session_info()
| /notebooks/Chap07.r | no_license | whatalnk/stat-intro-by-r-tkd | R | false | false | 8,032 | r |
# # 7章
# ## 7.2
# In[1]:
library(dplyr)
# In[2]:
library(tidyr)
# In[3]:
library(ggplot2)
# In[4]:
d.room <- data_frame(
kaiteki = c(6, 5, 6, 7, 2, 1, 1, 0, 7, 8, 8, 9, 8, 7, 6, 7),
size = c(rep("S", 8), rep("L", 8)),
student = c(rep(1, 4), rep(2, 4), rep(1, 4), rep(2, 4))
)
head(d.room)
# In[164]:
write_csv(d.room, "../data/table7-1.csv")
# In[5]:
str(d.room)
# p.98
#
# 誤
#
# ```r
# summary(aov(y ~ size + student + size:student))
# ```
#
# 正
#
# ```r
# summary(aov(kaiteki ~ size + student + size:student))
# ```
#
# In[8]:
d.room %>% aov(kaiteki ~ size + student + size:student, data = .) %>% summary()
# In[9]:
options(repr.plot.width = 3, repr.plot.height = 3)
# In[30]:
d.room %>% mutate(size=factor(size, levels=c("S", "L"), labels=c("S", "L")),
student=factor(student)) %>%
group_by(size, student) %>% summarise(kaiteki = mean(kaiteki)) %>%
ggplot(aes(x = size, y = kaiteki, group=student, colour=student)) + geom_line() + geom_point()
# In[31]:
d.room %>% mutate(size=factor(size, levels=c("S", "L"), labels=c("S", "L")),
student=factor(student)) %>%
group_by(student, size) %>% summarise(kaiteki = mean(kaiteki)) %>%
ggplot(aes(x = student, y = kaiteki, group=size, colour=size)) + geom_line() + geom_point()
# In[32]:
library(readr)
# In[50]:
d.soil <- read_csv("../samplecode/Rで学ぶ統計学入門図版作成用/table7-4.csv", col_names = FALSE)
str(d.soil)
# In[52]:
d.soil %>% select(plant=X1) %>% mutate(soil = c(rep(1, 10), rep(2, 10), rep(1, 10), rep(2, 10)),
ft = c(rep(1, 20), rep(2, 20))) -> d.soil
d.soil
# In[163]:
write_csv(d.soil, "../data/table7-2.csv")
# In[54]:
d.soil %>% aov(plant ~ soil + ft + soil:ft, data = .) %>% summary()
# In[59]:
d.soil %>% mutate(soil = factor(soil, levels=c(1, 2), labels = c("natural", "agricultural")), ft = factor(ft, levels = c(1, 2), labels = c("C", "F"))) %>%
group_by(soil, ft) %>% summarise(plant=mean(plant)) %>%
ggplot(aes(x = soil, y = plant, group=ft, colour=ft)) + geom_line() + geom_point()
# F値の計算
# In[61]:
d.soil %>% group_by(soil, ft) %>% summarise(plant=mean(plant))
# In[62]:
d.soil %>% select(-ft) %>% group_by(soil) %>% summarise(plant=mean(plant))
# In[63]:
d.soil %>% select(-soil) %>% group_by(ft) %>% summarise(plant=mean(plant))
# In[67]:
d.soil %>% summarise(plant=mean(plant))
# In[97]:
d.soil %>% group_by(soil, ft) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - 32.21)**2) %>% ungroup() %>% summarise(sum(x))
# In[80]:
d.soil %>% group_by(soil) %>% summarise(x = mean(plant), n = n()) %>% mutate(x = n * (x - 32.21)**2) %>% summarise(sum(x))
# In[81]:
d.soil %>% group_by(ft) %>% summarise(x = mean(plant), n = n()) %>% mutate(x = n * (x - 32.21)**2) %>% summarise(sum(x))
# In[88]:
d.soil %>% group_by(soil, ft) %>% mutate(x = (plant - mean(plant))**2 ) %>% summarise(x = sum(x)) %>% ungroup() %>% summarise(x = sum(x))
# In[110]:
calc_f_value <- function(df){
cat("要因", "SS", "df", "MS", "F", "\n")
n <- 10
n.soil <- 2
n.ft <- 2
mean.soil.ft <- df %>% group_by(soil, ft) %>% summarise(plant=mean(plant)) %>% .[["plant"]]
mean.soil <- df %>% select(-ft) %>% group_by(soil) %>% summarise(plant=mean(plant)) %>% .[["plant"]]
mean.ft <- df %>% select(-soil) %>% group_by(ft) %>% summarise(plant=mean(plant)) %>% .[["plant"]]
mean_ <- df %>% summarise(plant=mean(plant)) %>% .[["plant"]]
Y.SS <- df %>% mutate(x = (plant - mean_)**2) %>% summarise(Y.SS = sum(x)) %>% .[["Y.SS"]]
df.all <- n.soil * n.ft * n * 1
among.SS <- df %>% group_by(soil, ft) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - mean_)**2) %>% ungroup() %>% summarise(among.SS = sum(x)) %>% .[["among.SS"]]
df.among <- n.soil * n.ft - 1
among.MS <- among.SS / df.among
cat("グループ間", among.SS, df.among, "\n")
soil.SS <- df %>% group_by(soil) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - mean_)**2) %>% summarise(soil.SS = sum(x)) %>% .[["soil.SS"]]
df.soil <- n.soil - 1
soil.MS <- soil.SS / df.soil
ft.SS <- df %>% group_by(ft) %>% summarise(x = mean(plant), n = n()) %>%
mutate(x = n * (x - mean_)**2) %>% summarise(ft.SS = sum(x)) %>% .[["ft.SS"]]
df.ft <- n.ft - 1
ft.MS <- ft.SS / df.ft
inter.SS <- among.SS - soil.SS - ft.SS
df.inter <- (n.soil - 1) * (n.ft - 1)
inter.MS <- inter.SS / df.inter
within.SS <- df %>% group_by(soil, ft) %>%
mutate(x = (plant - mean(plant))**2 ) %>% summarise(x = sum(x)) %>% ungroup() %>% summarise(within.SS = sum(x)) %>% .[["within.SS"]]
df.within <- n.soil * n.ft * (n - 1)
within.MS <- within.SS / df.within
F.soil <- soil.MS / within.MS
cat("土壌効果", soil.SS, df.soil, soil.MS, F.soil, "\n")
F.ft <- ft.MS / within.MS
cat("施肥効果", ft.SS, df.ft, ft.MS, F.ft, "\n")
F.inter <- inter.MS / within.MS
cat("交互効果", inter.SS, df.inter, inter.MS, F.ft, "\n")
cat("グループ内", within.SS, df.within, within.SS, "\n")
cat("総合計", Y.SS, df.all, "\n")
}
calc_f_value(d.soil)
# ## 7.4 線形混合モデル: 固定要因とランダム変量要因を取り込む
# In[111]:
d.pig <- read_csv("../samplecode/Rで学ぶ統計学入門図版作成用/table7-5.csv")
str(d.pig)
# In[162]:
write_csv(d.pig, "../data/table7-5.csv")
# In[112]:
head(d.pig)
# In[114]:
options(repr.plot.width=6)
# In[116]:
summary(d.pig)
# In[117]:
d.pig %>% group_by(treat) %>% summarise(mean(wt))
# In[133]:
d.pig %>% ggplot(aes(x = number, y = wt, colour=treat)) + geom_point() +
geom_hline(yintercept = 111.5, linetype=2, colour=2) +
geom_hline(yintercept = 125.06, linetype=2, colour=4) +
geom_hline(yintercept = 118.3, linetype=2)
# In[136]:
d.pig %>% mutate(block=factor(block), treat=factor(treat)) %>% aov(wt ~ treat + Error(block / treat), data=.) %>% summary()
# ## 演習問題
# In[139]:
d.mouse <- data_frame(
wt = c(55.4, 49.7, 52.1, 49.5, 53.2, 51.4, 54.3,
47.2, 49.4, 51.3, 54.5, 48.1, 50.8, 52.7,
50.5, 48.2, 48.4, 52.1, 51.8, 49.7, 49.2,
47.3, 46.2, 48.8, 50.1, 48.2, 47.0, 46.5),
feed = c(rep("A", 7*2), rep("B", 7*2)),
gender = c(rep("M", 7), rep("F", 7), rep("M", 7), rep("F", 7))
)
head(d.mouse)
# In[161]:
write_csv(d.mouse, "../data/table-ex7-2.csv")
# In[141]:
options(repr.plot.width=3)
# In[142]:
d.mouse %>% ggplot(aes(x = feed, y = wt, group=gender, colour=gender)) + geom_point()
# In[145]:
d.mouse %>% mutate(feed=factor(feed), gender=factor(gender)) %>% group_by(feed, gender) %>% summarise(wt=mean(wt)) %>%
ggplot(aes(x=feed, y=wt, group=gender, colour=gender)) + geom_line() + geom_point()
# In[146]:
d.mouse %>% aov(wt ~ feed + gender + feed:gender, data=.)
# In[147]:
d.mouse %>% aov(wt ~ feed + gender + feed:gender, data=.) %>% summary()
# ### 演習問題7.3
# In[149]:
d.exam <- data_frame(
indiv = c(1:36),
score = c(78, 72, 81, 71, 74, 72,
85, 83, 79, 77, 75, 77,
83, 86, 74, 76, 73, 71,
66, 71, 72, 60, 58, 55,
58, 49, 62, 69, 70, 63,
65, 59, 54, 67, 66, 67),
juku = c(rep(1, 18), rep(2, 18)),
class = rep(c(rep("A", 6), rep("B", 6), rep("C", 6)), 2)
)
head(d.exam)
# In[160]:
write_csv(d.exam, "../data/table-ex7-3.csv")
# In[151]:
options(repr.plot.width = 6)
# In[155]:
d.exam %>% ggplot(aes(x = indiv, y = score, shape=factor(juku), colour=class)) + geom_point()
# In[157]:
d.exam %>% mutate(juku=factor(juku), class=factor(class)) %>%
aov(score ~ juku + Error(class / juku), data=.)
# In[158]:
d.exam %>% mutate(juku=factor(juku), class=factor(class)) %>%
aov(score ~ juku + Error(class / juku), data=.) %>% summary()
# In[33]:
devtools::session_info()
|
# CREATEBINNULL2.R
# Part of the FALCON (Framework of Adaptive ensembLes for the Comparison Of
# Nestedness) package: https://github.com/sjbeckett/FALCON
# Last updated: 11th July 2014
CREATEBINNULL2 <- function(MATRIX,numbernulls,measures,binNull,sortVar) { #FF
# Fixed - Fixed null model
#Creates null matrices that conserve the same number of elements per row
#and column (the degree) as in the input matrix using the fast and robust
#curveball algorithm of Strona et al. 2014.
#G Strona, D Nappo, F Boccacci, S Fattorini, J San-Miguel-Ayanz. 2014.
#A fast and unbiased procedure to randomize ecological binary matrices with
#fixed row and column totals.
#Nature Communications 5: 4114. (http://dx.doi.org/10.1038/ncomms5114)
MEASURES <- array(0,dim=c(length(measures),numbernulls)) #To store measure answers.
r<-dim(MATRIX)[1]
c<-dim(MATRIX)[2]
ROW <- rep(0,c)
for(aa in 1:numbernulls) {#for each null matrix
TEST <- MATRIX; #start with the input matrix
for(rep in 1:(5*r)) {
AB <- sample(1:r,2) #choose two rows
A <- TEST[AB[1],] #vector of elements in row 1
J <- A - TEST[AB[2],]# difference between row 1 and row 2
if((max(J) - min(J)) == 2) { #if uniques(a column with 1 in one row, 0 in other) in both rows can perform a swap.
tot <- which(abs(J)==1) #all unique indices
l_tot <- length(tot) #num uniques
tot <- sample(tot,l_tot) #shuffled uniques
both <- which(J==0 & A==1) #things that appear (precenses) in both rows
L <- sum(J==1) #sum of uniques in row 1. ( 1-0 )
ROW1 <- c(both, tot[1:L]) #row1 presences
ROW2 <- c(both, tot[(L+1):l_tot]) #new row 2 presences
I <- ROW
I[ROW1] <- 1
K <- ROW
K[ROW2] <- 1
TEST[AB,] <- rbind(I,K)
}
}
#sort
TEST<-sortMATRIX(TEST,binNull,sortVar)$sortMAT
#measure
for (ww in 1:length(measures)) {
MEASURES[ww,aa] <- measures[[ww]](TEST)
}
}
return(MEASURES)
}
| /R/NULLS/CREATEBINNULL2.R | permissive | sjbeckett/FALCON | R | false | false | 2,248 | r | # CREATEBINNULL2.R
# Part of the FALCON (Framework of Adaptive ensembLes for the Comparison Of
# Nestedness) package: https://github.com/sjbeckett/FALCON
# Last updated: 11th July 2014
CREATEBINNULL2 <- function(MATRIX,numbernulls,measures,binNull,sortVar) { #FF
# Fixed - Fixed null model
#Creates null matrices that conserve the same number of elements per row
#and column (the degree) as in the input matrix using the fast and robust
#curveball algorithm of Strona et al. 2014.
#G Strona, D Nappo, F Boccacci, S Fattorini, J San-Miguel-Ayanz. 2014.
#A fast and unbiased procedure to randomize ecological binary matrices with
#fixed row and column totals.
#Nature Communications 5: 4114. (http://dx.doi.org/10.1038/ncomms5114)
MEASURES <- array(0,dim=c(length(measures),numbernulls)) #To store measure answers.
r<-dim(MATRIX)[1]
c<-dim(MATRIX)[2]
ROW <- rep(0,c)
for(aa in 1:numbernulls) {#for each null matrix
TEST <- MATRIX; #start with the input matrix
for(rep in 1:(5*r)) {
AB <- sample(1:r,2) #choose two rows
A <- TEST[AB[1],] #vector of elements in row 1
J <- A - TEST[AB[2],]# difference between row 1 and row 2
if((max(J) - min(J)) == 2) { #if uniques(a column with 1 in one row, 0 in other) in both rows can perform a swap.
tot <- which(abs(J)==1) #all unique indices
l_tot <- length(tot) #num uniques
tot <- sample(tot,l_tot) #shuffled uniques
both <- which(J==0 & A==1) #things that appear (precenses) in both rows
L <- sum(J==1) #sum of uniques in row 1. ( 1-0 )
ROW1 <- c(both, tot[1:L]) #row1 presences
ROW2 <- c(both, tot[(L+1):l_tot]) #new row 2 presences
I <- ROW
I[ROW1] <- 1
K <- ROW
K[ROW2] <- 1
TEST[AB,] <- rbind(I,K)
}
}
#sort
TEST<-sortMATRIX(TEST,binNull,sortVar)$sortMAT
#measure
for (ww in 1:length(measures)) {
MEASURES[ww,aa] <- measures[[ww]](TEST)
}
}
return(MEASURES)
}
|
# this is a test
diabetes_data <- read.csv("Diabetes-md.csv",
na = "", stringsAsFactors=FALSE)
# what's the data type
class(diabetes_data)
# what's the structure
str(diabetes_data)
# what's the dimension
dim(diabetes_data)
#row(diabetes_data)
#install.packages("VIM")
library(VIM)
missing_values <- aggr(diabetes_data, prop = FALSE, numbers = TRUE)
# Show summary of the contents of missing_values
summary(missing_values)
# -------------------------------------------------------------------------------
# Dealing with missing data
# -------------------------------------------------------------------------------
# There are several methods we can use to extract out
# missing data before we decide whether to delete it
# We can remove all missing data with this code.
# Removes any rows that contains NA - listwise deletion
new_diabetes_data <- na.omit(diabetes_data)
new_diabetes_data
# We can use complete.cases to show rows where data is available
# Here's an example of how to do this
complete_diabetes_data <- complete.cases(diabetes_data)
complete_diabetes_data
# Show sum of missing rows
sum(complete_diabetes_data)
# missing details
diabetes_data[!complete.cases(diabetes_data), ]
# install mice
# install.packages("mice)
library("mice")
md.pattern(diabetes_data)
| /test.R | no_license | rachel0614/practicals | R | false | false | 1,311 | r | # this is a test
diabetes_data <- read.csv("Diabetes-md.csv",
na = "", stringsAsFactors=FALSE)
# what's the data type
class(diabetes_data)
# what's the structure
str(diabetes_data)
# what's the dimension
dim(diabetes_data)
#row(diabetes_data)
#install.packages("VIM")
library(VIM)
missing_values <- aggr(diabetes_data, prop = FALSE, numbers = TRUE)
# Show summary of the contents of missing_values
summary(missing_values)
# -------------------------------------------------------------------------------
# Dealing with missing data
# -------------------------------------------------------------------------------
# There are several methods we can use to extract out
# missing data before we decide whether to delete it
# We can remove all missing data with this code.
# Removes any rows that contains NA - listwise deletion
new_diabetes_data <- na.omit(diabetes_data)
new_diabetes_data
# We can use complete.cases to show rows where data is available
# Here's an example of how to do this
complete_diabetes_data <- complete.cases(diabetes_data)
complete_diabetes_data
# Show sum of missing rows
sum(complete_diabetes_data)
# missing details
diabetes_data[!complete.cases(diabetes_data), ]
# install mice
# install.packages("mice)
library("mice")
md.pattern(diabetes_data)
|
library(Sleuth2)
### Name: case0202
### Title: Anatomical Abnormalities Associated with Schizophrenia
### Aliases: case0202
### Keywords: datasets
### ** Examples
str(case0202)
with(case0202, stem(Unaffect-Affected, scale=2))
| /data/genthat_extracted_code/Sleuth2/examples/case0202.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 233 | r | library(Sleuth2)
### Name: case0202
### Title: Anatomical Abnormalities Associated with Schizophrenia
### Aliases: case0202
### Keywords: datasets
### ** Examples
str(case0202)
with(case0202, stem(Unaffect-Affected, scale=2))
|
## Download Data from source
if (!file.exists("./data/household_power_consumption.txt")) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/power_data.zip",method="curl")
# Unzip the file
unzip(zipfile="./data/power_data.zip",exdir="./data")
}
## Getting full dataset
data_full <- read.csv("./data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
| /plot4.R | no_license | flash55/ExData_Plotting1 | R | false | false | 1,635 | r | ## Download Data from source
if (!file.exists("./data/household_power_consumption.txt")) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/power_data.zip",method="curl")
# Unzip the file
unzip(zipfile="./data/power_data.zip",exdir="./data")
}
## Getting full dataset
data_full <- read.csv("./data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
return_keywords_and_thesaurus_as_data_frame <- function(all_subjects){
keywords_metadata <-NULL
thesaurus <-NULL
all_keywords <-NULL
all_keywords <-data.frame(keyword = character(), thesaurus = character(),stringsAsFactors=FALSE)
list_subjects <- strsplit(as.character(all_subjects), split = "\n")
for(subjects in list_subjects[[1]]){
cat(subjects)
# subjects=list_subjects[[1]][1]
cat("\n")
split_subjects <- strsplit(subjects, split = "=")
thesaurus_name <- split_subjects[[1]][1]
thesaurus[[length(thesaurus)+1]] <- thesaurus_name
all_subjects <- split_subjects[[1]][2]
list_keywords <- strsplit(as.character(all_subjects), split = ",")
list_keywords <- unlist(list_keywords)
for (k in list_keywords){
all_keywords[nrow(all_keywords)+1,] <- c(k, thesaurus_name)
}
keywords_metadata$all_keywords <- all_keywords
keywords_metadata$thesaurus <- thesaurus
TopicCategory <- c("biota", "oceans", "environment", "geoscientificInformation","economy")
keywords_metadata$TopicCategory <- TopicCategory
}
return(keywords_metadata)
}
################################################################################
add_contacts_and_roles_OGC_19115 <- function(config, metadata_identifier, contacts_roles, expected_role){
contacts <- config$gsheets$contacts
listContacts = list()
if(is.null(contacts_roles)==FALSE){
for(j in expected_role){
number_row<-nrow(contacts_roles)
for(i in 1:number_row){
if(contacts_roles$dataset[i]==metadata_identifier & (contacts_roles$RoleCode[i]==j)){
the_contact <- contacts[contacts$electronicMailAddress%in%contacts_roles$contact[i],]
rp <- ISOResponsibleParty$new()
rp$setIndividualName(paste(as.character(the_contact$Name),as.character(the_contact$firstname),sep=" "))
rp$setIndividualName(paste(the_contact$Name,the_contact$firstname,sep=" "))
rp$setOrganisationName(as.character(the_contact$organisationName))
rp$setPositionName(as.character(the_contact$positionName))
if(is.null(the_contact$setPositionName)==FALSE){
rp$setPositionName(as.character(the_contact$setPositionName))
}
###########################################################################################################
if (contacts_roles$RoleCode[i]=="pointOfContact"){rp$setRole("pointOfContact")}
if (contacts_roles$RoleCode[i]=="metadata"){rp$setRole("pointOfContact")}
if (contacts_roles$RoleCode[i]=="publisher"){rp$setRole("publisher")}
if (contacts_roles$RoleCode[i]=="data_entry"){rp$setRole("originator")}
if (contacts_roles$RoleCode[i]=="data_collection"){rp$setRole("resourceProvider")}
if (contacts_roles$RoleCode[i]=="data"){rp$setRole("owner")}
if (contacts_roles$RoleCode[i]=="owner"){rp$setRole("author")}
if (contacts_roles$RoleCode[i]=="originator"){rp$setRole("originator")}
if (contacts_roles$RoleCode[i]=="principalInvestigator"){rp$setRole("originator")}
if (contacts_roles$RoleCode[i]=="data_structure_definition"){rp$setRole("processor")}
if (grepl("processor_step",contacts_roles$RoleCode[i])){rp$setRole("processor")}
# if (contacts_roles$RoleCode[i]=="pointOfContact"){rp$setRole("originator")}
###########################################################################################################
contact <- ISOContact$new()
phone <- ISOTelephone$new()
phone$setVoice(as.character(the_contact$voice))
phone$setFacsimile(as.character(the_contact$facsimile))
contact$setPhone(phone)
address <- ISOAddress$new()
address$setDeliveryPoint(as.character(the_contact$deliveryPoint))
address$setCity(as.character(the_contact$city))
address$setPostalCode(the_contact$postalCode)
address$setCountry(the_contact$country)
address$setEmail(the_contact$electronicMailAddress)
contact$setAddress(address)
res <- ISOOnlineResource$new()
res$setLinkage(the_contact$ISOOnlineResource)
res$setName(the_contact$setNameISOOnlineResource)
contact$setOnlineResource(res)
rp$setContactInfo(contact)
listContacts[[length(listContacts)+1]] <- rp
}
}
}
}
return(listContacts)
#Function over
}
#-----------------------------------------------------------------------------------------------------
#prepareDataQualityWithLineage
#@param config
#@param lineage_statement Statement to describe the overall lineage steps sequence
#@param lineage_steps a "list" object with the step description
#@param processors an object of class "list" giving a list "ISOResponsibleParty" corresponding to the processor(s) (for the time being common to each step)
#@param dataset_integration_date date of integration used commonly for all steps. 'ISOBaseDateTime' or POSIXct/POSIXt object
#@param contacts_roles
#@returns an object of class ISODataQuality
prepareDataQualityWithLineage <- function(config, lineage_statement, lineage_steps, dataset_integration_date,
metadata_identifier, contacts_roles){
dq <- ISODataQuality$new()
scope <- ISOScope$new()
scope$setLevel("dataset")
dq$setScope(scope)
#add lineage
lineage <- ISOLineage$new()
lineage$setStatement(lineage_statement)
#add processing steps
stepNb <- 1
for(step in lineage_steps){
ps <- ISOProcessStep$new()
ps$setDescription(sprintf("Step %s - %s", stepNb, step))
ps$setDateTime(dataset_integration_date)
#TODO
role=paste("processor_step",stepNb,sep="")
expected_role=c(role)
processors <- add_contacts_and_roles_OGC_19115(config, metadata_identifier, contacts_roles, expected_role)
for(processor in processors){
ps$addProcessor(processor)
}
lineage$addProcessStep(ps)
stepNb <- stepNb+1
}
#process step N: Data Publication in Tuna Atlas Catalogue
psN <- ISOProcessStep$new()
psN$setDescription(sprintf("Step %s - Data Publication in Tuna Atlas catalogue", stepNb))
psN$setDateTime(Sys.time())
#ONLY ONE PROCESSOR IN THIS CASE
expected_role=c("data_structure_definition")
processor <- add_contacts_and_roles_OGC_19115(config, metadata_identifier, contacts_roles, expected_role)
#do we need a processor when there is no data_structure_definition? ie when it's not a detailed dataset, or a dataset not compliant with FDI
if(length(processor)>0){psN$addProcessor(processor[[1]])}
lineage$addProcessStep(psN)
dq$setLineage(lineage)
return(dq)
}
#-----------------------------------------------------------------------------------------------------
#extractLineage
#@param lineage Lineage information as string extracted from metadata table in the database
# ("step: text1. step: text2. .... step: textN.")
#@returns an object of class "list" with the step contents
extractLineage <- function(lineage){
lineage_steps <- as.list(unlist(strsplit(lineage, "step[0-9]:"))) #@eblondel 12/08/2017 apply regular expression to detect step nb
lineage_steps <- lineage_steps[sapply(lineage_steps, function(x){return(nchar(x)>0)})]
lineage_steps <- lapply(lineage_steps, function(x){
out <- x
if(grepl("^ ", x)) out <- substr(x, 2, nchar(x))
if(grepl(" $", x)) out <- substr(out, nchar(out)-1, nchar(out))
return(out)
})
lineage_steps <- gsub("\n", "", lineage_steps)
return(lineage_steps)
}
#write_metadata_OGC_19115
write_metadata_OGC_19115_from_Dublin_Core <- function(config = NULL,
metadata = NULL,
contacts_metadata = NULL,
spatial_metadata = NULL,
temporal_metadata = NULL,
keywords_metadata = NULL, # DATAFRAME WITH ALL (STATIC & DYNAMIC) KEYWORDS
urls_metadata= NULL # LIST OF DYNAMIC / COMMON URLs
)
{
#config shortcuts
con <- config$sdi$db$con
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
# OGC 19115 SECTION => Metadata entity set information
#-------------------------------------------------------------------------------------------------------------------
logger.info("OGC 19115 SECTION => Metadata entity set information")
#create the ISO Metadata sheet and fill it with all required elements
md = ISOMetadata$new()
md$setFileIdentifier(metadata$Permanent_Identifier)
if(is.null(metadata$Parent_Metadata_Identifier)==FALSE){md$setParentIdentifier(metadata$Parent_Metadata_Identifier)} #=> TO BE DONE NEW ??? series software service
md$setLanguage(metadata$Language)# if metadata and resource have the same language
md$setCharacterSet("utf8")
# md <- ISOBaseCharacterString$new(value = "utf8") ???
md$addHierarchyLevel(metadata$addHierarchyLevel)# => not working ask Emmanuel Blondel
# md$setHierarchyLevelName("This datasets is the result of a query in a SQL DataWarehouse")
# TODO MANAGE "hierarchyLevelName" metadata element @julien
logger.info("Add the contacts and roles for this METADATA sheet")
expected_role=c("pointOfContact","metadata")
listContacts <- add_contacts_and_roles_OGC_19115(config, metadata$Identifier, contacts_metadata$contacts_roles, expected_role)
for(listContact in listContacts){
md$addContact(listContact)
}
# TODO endless discussion to define what date should be used with @paul & @emmanuel & @julien
#mdDate <- metadata$Date
# md$setDateStamp(ISOdate(2015, 1, 1, 1))
mdDate <- Sys.time()
md$setDateStamp(mdDate)
md$setMetadataStandardName("ISO 19115:2003/19139")
md$setMetadataStandardVersion("1.0")
# TODO decide if we should set up a URI @paul & @emmanuel & @julien
# md$setDataSetURI(paste(metadata$Identifier,"use a DOI instead ?",sep=" / "))
# md$setDataSetURI(metadata$Identifier)
logger.info("MD_Metadata section is set")
# OGC 19115 SECTION => Metadata entity set information
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("OGC 19115 SECTION => Spatial Representation")
logger.info("-------------------------------------------------------------------------------------------------------------------")
###########################################################################################
#VectorSpatialRepresentation
# ----------------
if (!is.null(spatial_metadata$SpatialRepresentationType)){
if (spatial_metadata$SpatialRepresentationType == "vector" ){
VSR <- ISOVectorSpatialRepresentation$new()
VSR$setTopologyLevel("geometryOnly")
geomObject <- ISOGeometricObjects$new()
geomObject$setGeometricObjectType(spatial_metadata$GeometricObjectType)
if(is.null(spatial_metadata$dynamic_metadata_count_features)==FALSE){
geomObject$setGeometricObjectCount(spatial_metadata$dynamic_metadata_count_features) #number of features
}
# VSR$setGeometricObjects(geomObject)
VSR$addGeometricObjects(geomObject)
# md$setSpatialRepresentationInfo(VSR)
md$addSpatialRepresentationInfo(VSR)
}
}
logger.info("SpatialRepresentation section is set !")
# OGC 19115 SECTION => Reference System
#-------------------------------------------------------------------------------------------------------------------
logger.info("OGC 19115 SECTION => Reference System ")
# TO BE DONE MANAGE VECTOR AND RASTER
if(is.null(spatial_metadata$SRID)==FALSE){
RS <- ISOReferenceSystem$new()
RSId <- ISOReferenceIdentifier$new(code = spatial_metadata$SRID[[1]], codeSpace = "EPSG")
RS$setReferenceSystemIdentifier(RSId)
md$setReferenceSystemInfo(RS)
}
logger.info("ReferenceSystem section is set !")
# OGC 19115 SECTION => Identification Section (MD_Identification)
#-------------------------------------------------------------------------------------------------------------------
logger.info("OGC 19115 SECTION => Identification Section (MD_Identification) ")
IDENT <- ISODataIdentification$new()
IDENT$setAbstract(metadata$Description)
#TODO @julien @paul => ADAPT WITH THE CONTENT OF THE "DESCRIPTION" COLUMN
# if(is.null(metadata$Purpose)==FALSE){
# IDENT$setPurpose(metadata$Purpose)
# }
# for(i in list_Credits){IDENT$addCredit(i)} # TO be done uncomment
IDENT$setLanguage(metadata$Language)
IDENT$setCharacterSet("utf8")
#topic categories (one or more)
# for(i in keywords_metadata$TopicCategory){IDENT$addTopicCategory(i)}
#adding a point of contact for the identificaiton
#organization contact
logger.info("Write contacts and roles for Data Identification Section")
expected_role=c("publisher","principalInvestigator")
listContacts <- add_contacts_and_roles_OGC_19115(config, metadata$Identifier, contacts_metadata$contacts_roles, expected_role)
for(listContact in listContacts){
# SERVICE$pointOfContact <- c(SERVICE$pointOfContact, serviceContact)
IDENT$addPointOfContact(listContact)
}
logger.info("Contacts for IDENTIFICATION SECTION added")
ct <- ISOCitation$new()
ct$setTitle(metadata$Title)
d <- ISODate$new()
d$setDate(mdDate)
d$setDateType("revision")
ct$addDate(d)
ct$setEdition("1.0")
ct$setEditionDate(as.Date(mdDate)) #EditionDate should be of Date type
# ct$setIdentifier(mdId)
ct$setIdentifier(ISOMetaIdentifier$new(code = metadata$Permanent_Identifier)) #Julien code à vérifier
ct$setPresentationForm("mapDigital") # @julien check relevant value in ISO 19115 CODE LIST
# TODO @julien CHECK IF ADDED CONTACT IS CORRECT IN THIS CONTEXT (SHOULD NOT => LAST FROM LIST ABOVE)
ct$setCitedResponsibleParty(listContact)
IDENT$setCitation(ct)
if(is.null(urls_metadata$http_urls)==FALSE){
logger.info("Add list of graphic overview")
number_row<-nrow(urls_metadata$http_urls)
for (i in 1:number_row){
if (startsWith(urls_metadata$http_urls$http_URLs_names[i],"thumbnail")){
go <- ISOBrowseGraphic$new(
fileName = urls_metadata$http_urls$http_URLs_links[i],
fileDescription = urls_metadata$http_urls$http_URLs_descriptions[i],
fileType = urls_metadata$http_urls$http_URLs_protocols[i]
)
IDENT$addGraphicOverview(go)
}
}
}
# Constraint information
#maintenance information
mi <- ISOMaintenanceInformation$new()
mi$setMaintenanceFrequency(metadata$Update_frequency)
IDENT$setResourceMaintenance(mi)
#adding legal constraint(s)
# Julien => information to be stored in the metadata table (not hard coded)
lc <- ISOLegalConstraints$new()
lc$addUseLimitation(metadata$Rights)
# lc$addUseLimitation("Use limitation 2 e.g. Citation guidelines")
# lc$addUseLimitation("Use limitation 3 e.g. Disclaimer")
# lc$addAccessConstraint("copyright")
# lc$addAccessConstraint("license")
# lc$addUseConstraint("copyright")
# lc$addUseConstraint("license")
IDENT$setResourceConstraints(lc)
#adding security constraints
# sc <- ISOSecurityConstraints$new()
# sc$setClassification("secret")
# sc$setUserNote("ultra secret")
# sc$setClassificationSystem("no classification in particular")
# sc$setHandlingDescription("description")
# IDENT$addResourceConstraints(sc)
# MD_Constraints
#adding extent: https://github.com/eblondel/geometa/blob/master/tests/testthat/test_ISOExtent.R
#adding SPATIAL extent: WHERE ?
extent <- ISOExtent$new()
# extent <- ISOSpatialTemporalExtent$new() #=> TO BE DONE REPLACE PREVIOUS VERSION ? https://github.com/eblondel/geometa/blob/master/tests/testthat/test_ISOSpatialTemporalExtent.R
spatialExtent <- ISOGeographicBoundingBox$new(minx = spatial_metadata$dynamic_metadata_spatial_Extent$xmin, miny=spatial_metadata$dynamic_metadata_spatial_Extent$ymin, maxx=spatial_metadata$dynamic_metadata_spatial_Extent$xmax, maxy=spatial_metadata$dynamic_metadata_spatial_Extent$ymax) #or use bbox parameter instead for specifying output of bbox(sp)
extent$addGeographicElement(spatialExtent)
logger.info("Spatial extent added!")
if(is.null(spatial_metadata$geographic_identifier)==FALSE){
for (i in 1:length(unique(spatial_metadata$geographic_identifier))) { # to be done with emmanuel => parentidentifier instead !!!
geographicIdentifier <- ISOGeographicDescription$new()
geographicIdentifier$setGeographicIdentifier(ISOMetaIdentifier$new(code = spatial_metadata$geographic_identifier[i] ))
extent$addGeographicElement(geographicIdentifier)
}
}
#adding TEMPORAL extent: WHEN ? https://github.com/eblondel/geometa/blob/master/tests/testthat/test_ISOTemporalExtent.R
if(is.null(temporal_metadata$dynamic_metadata_temporal_Extent)==FALSE){
time <- ISOTemporalExtent$new()
start_date <- temporal_metadata$dynamic_metadata_temporal_Extent$start_date
end_date <- temporal_metadata$dynamic_metadata_temporal_Extent$end_date
cat("start_date")
cat(start_date)
cat("end_date")
cat(end_date)
temporalExtent <- GMLTimePeriod$new(beginPosition = start_date, endPosition = end_date)
time$setTimePeriod(temporalExtent)
extent$setTemporalElement(time)
}
IDENT$setExtent(extent)
logger.info("Temporal extent added!")
#add keywords: WHAT ?
#you can associate many "ISOKeywords" which is a group of keyword
#giving to each group a specific thematic thesaurus (e.g. gears, species, etc)
#add general static keywords for this dataset
#--------------------------------------------
different_thesaurus <- unique(keywords_metadata$all_keywords$thesaurus)
number_thesaurus<-length(unique(different_thesaurus))
for(t in 1:number_thesaurus){
logger.info(sprintf("Creating a new thesarus keywords set '%s'",different_thesaurus[t]))
if(is.null(keywords_metadata$all_keywords)==FALSE){
dynamic_keywords <- ISOKeywords$new()
number_row<-nrow(keywords_metadata$all_keywords)
for (i in 1:number_row) {
if(keywords_metadata$all_keywords$thesaurus[i]==different_thesaurus[t]){dynamic_keywords$addKeyword(keywords_metadata$all_keywords$keyword[i])}
}
dynamic_keywords$setKeywordType("theme") # to be done "place" for spatial Thesaurus..
# Specifiy Thesaurus
th_general_keywords <- ISOCitation$new()
th_general_keywords$setTitle(different_thesaurus[t])
th_general_keywords$addDate(d)
dynamic_keywords$setThesaurusName(th_general_keywords)
IDENT$addKeywords(dynamic_keywords)
}
}
#supplementalInformation
IDENT$setSupplementalInformation("to add in case additional information")
#spatial representation type
# TODO voir la différence avec l'autre spatial_metadata$SpatialRepresentationType
if (!is.null(spatial_metadata$SpatialRepresentationType)){
if (spatial_metadata$SpatialRepresentationType == "vector" ){
IDENT$addSpatialRepresentationType(spatial_metadata$SpatialRepresentationType)
}
}
md$addIdentificationInfo(IDENT)
logger.info("Identification information (MD_Identification) section is set!")
# OGC 19115 SECTION => Identification with ISO 19119 Service Identification
#-------------------------------------------------------------------------------------------------------------------
# TODO @julien => REMOVED FROM TUNA ATLAS (NOT GENERIC)
# OGC 19115 SECTION => Distribution
#-------------------------------------------------------------------------------------------------------------------
distrib <- ISODistribution$new()
dto <- ISODigitalTransferOptions$new()
logger.info("Select the set of URLs to be displayed as OnlineResource")
if(is.null(urls_metadata$http_urls)==FALSE){
number_row<-nrow(urls_metadata$http_urls)
for (i in 1:number_row) {
if (startsWith(urls_metadata$http_urls$http_URLs_names[i],"thumbnail")==FALSE){
newURL <- ISOOnlineResource$new()
newURL$setLinkage(urls_metadata$http_urls$http_URLs_links[i])
newURL$setName(urls_metadata$http_urls$http_URLs_names[i])
newURL$setDescription(urls_metadata$http_urls$http_URLs_descriptions[i])
newURL$setProtocol(urls_metadata$http_urls$http_URLs_protocols[i])
dto$addOnlineResource(newURL)
}
}
}
distrib$setDigitalTransferOptions(dto)
format <- ISOFormat$new()
format$setName(metadata$Format)
format$setVersion("Postgres 9 and Postgis 2") # to be done => stored in the spreadsheet
# format$setAmendmentNumber("2")
# format$setSpecification("specification")
distrib$addFormat(format)
logger.info("Write DistributionInfo section")
#add as many online resources you need (WMS, WFS, website link, etc)
md$setDistributionInfo(distrib)
# OGC 19115 SECTION => Data Quality
#-------------------------------------------------------------------------------------------------------------------
#add Data / lineage for steps
# TODO @julien @paul @emmanuel => REPLACE THIS TEXT BY REAL DESCRIPTION WHEN READY
#example of lineage
lineage_statement <- "Data management workflow description"
lineage_steps <- list()
lineage <- metadata$Lineage
if(!is.na(lineage) & metadata$Dataset_Type!="NetCDF" & metadata$Dataset_Type!="google_doc"){
logger.info("Add Lineage process steps")
#create lineage
lineage_steps <- extractLineage(lineage)
DQ1 <- prepareDataQualityWithLineage(config, lineage_statement, lineage_steps, mdDate, metadata$Identifier, contacts_metadata$contacts_roles)
md$addDataQualityInfo(DQ1)
}
#Data Quality Genealogy => @julien REMOVED (TUNA ATLAS SPECIFIC)
#----------------------
logger.info("Data Quality Info section added")
# OGC 19115 SECTION => Content Info -> FeatureCatalogueDescription => REMOVED FROM GENERIC WORKFLOW AT THIS STAGE
#-------------------------------------------------------------------------------------------------------------------
return(md)
}
#push_metadata_in_geonetwork
#@param config
#@param metadata$Permanent_Identifier
#@param md
push_metadata_in_geonetwork <- function(config, metadata_permanent_id, md){
#config shortcuts
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
#config shortcuts
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
GN <- config$sdi$geonetwork$api
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("set shortcuts for Geonetwork config")
logger.info("-------------------------------------------------------------------------------------------------------------------")
#to insert or update a metadata into a geonetwork.
#An insert has to be done in 2 operations (the insert itself, and the privilege setting to "publish" it either to a restrained group or to public)
#An update has to be done based on the internal Geonetwork id (that can be queried as well
privileges <- c("view","dynamic")
if(is(md, "ISOMetadata")){
privileges <- c(privileges, "featured")
}
metaId <- GN$get(metadata_permanent_id, by = "uuid", output = "id")
if(is.null(metaId)){
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("insert metadata (once inserted only visible to the publisher)")
logger.info("-------------------------------------------------------------------------------------------------------------------")
created = GN$insertMetadata(xml = md$encode(), group = "1", category = "datasets")
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("config privileges")
config <- GNPrivConfiguration$new()
config$setPrivileges("all", privileges)
GN$setPrivConfiguration(id = created, config = config)
}else{
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("update the metadata")
logger.info("-------------------------------------------------------------------------------------------------------------------")
updated = GN$updateMetadata(id = metaId, xml = md$encode())
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("config privileges")
config <- GNPrivConfiguration$new()
config$setPrivileges("all", privileges)
GN$setPrivConfiguration(id = metaId, config = config)
}
md_url <- paste(config$sdi$geonetwork$url, "/srv/eng/catalog.search#/metadata/",metadata_permanent_id,sep="")
return(md_url)
}
push_metadata_in_csw_server <- function(config,metadata_identifier,md){
#config shortcuts
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
#shortcut for CSW-T server config
CSW_URL <- config$sdi$csw_server$url
CSW_admin <- config$sdi$csw_server$user
CSW_password <- config$sdi$csw_server$pwd
csw <- CSWClient$new(CSW_URL, "2.0.2", user = CSW_admin, CSW_password,logger="INFO")
record <-NULL
#get record by id
CQL <- paste0("dc:identifier = '",metadata_identifier,"'")
cons <- CSWConstraint$new(cqlText = CQL)
query <- CSWQuery$new(constraint = cons)
record <- csw$getRecords(query = query)
if(length(record)==0){
logger.info("The metadata doesn't exist: creating it !")
insert <- csw$insertRecord(record = md)
return(insert)
} else {
logger.info("The metadata already exists: updating it !")
update <- csw$updateRecord(record = md)
update$getResult() #TRUE if updated, FALSE otherwise
return(update)
}
} | /metadata_workflow_Postgres_Postgis/scripts/write_metadata_OGC_19115_from_Dublin_Core.R | no_license | juldebar/R_Metadata | R | false | false | 26,431 | r |
return_keywords_and_thesaurus_as_data_frame <- function(all_subjects){
keywords_metadata <-NULL
thesaurus <-NULL
all_keywords <-NULL
all_keywords <-data.frame(keyword = character(), thesaurus = character(),stringsAsFactors=FALSE)
list_subjects <- strsplit(as.character(all_subjects), split = "\n")
for(subjects in list_subjects[[1]]){
cat(subjects)
# subjects=list_subjects[[1]][1]
cat("\n")
split_subjects <- strsplit(subjects, split = "=")
thesaurus_name <- split_subjects[[1]][1]
thesaurus[[length(thesaurus)+1]] <- thesaurus_name
all_subjects <- split_subjects[[1]][2]
list_keywords <- strsplit(as.character(all_subjects), split = ",")
list_keywords <- unlist(list_keywords)
for (k in list_keywords){
all_keywords[nrow(all_keywords)+1,] <- c(k, thesaurus_name)
}
keywords_metadata$all_keywords <- all_keywords
keywords_metadata$thesaurus <- thesaurus
TopicCategory <- c("biota", "oceans", "environment", "geoscientificInformation","economy")
keywords_metadata$TopicCategory <- TopicCategory
}
return(keywords_metadata)
}
################################################################################
add_contacts_and_roles_OGC_19115 <- function(config, metadata_identifier, contacts_roles, expected_role){
contacts <- config$gsheets$contacts
listContacts = list()
if(is.null(contacts_roles)==FALSE){
for(j in expected_role){
number_row<-nrow(contacts_roles)
for(i in 1:number_row){
if(contacts_roles$dataset[i]==metadata_identifier & (contacts_roles$RoleCode[i]==j)){
the_contact <- contacts[contacts$electronicMailAddress%in%contacts_roles$contact[i],]
rp <- ISOResponsibleParty$new()
rp$setIndividualName(paste(as.character(the_contact$Name),as.character(the_contact$firstname),sep=" "))
rp$setIndividualName(paste(the_contact$Name,the_contact$firstname,sep=" "))
rp$setOrganisationName(as.character(the_contact$organisationName))
rp$setPositionName(as.character(the_contact$positionName))
if(is.null(the_contact$setPositionName)==FALSE){
rp$setPositionName(as.character(the_contact$setPositionName))
}
###########################################################################################################
if (contacts_roles$RoleCode[i]=="pointOfContact"){rp$setRole("pointOfContact")}
if (contacts_roles$RoleCode[i]=="metadata"){rp$setRole("pointOfContact")}
if (contacts_roles$RoleCode[i]=="publisher"){rp$setRole("publisher")}
if (contacts_roles$RoleCode[i]=="data_entry"){rp$setRole("originator")}
if (contacts_roles$RoleCode[i]=="data_collection"){rp$setRole("resourceProvider")}
if (contacts_roles$RoleCode[i]=="data"){rp$setRole("owner")}
if (contacts_roles$RoleCode[i]=="owner"){rp$setRole("author")}
if (contacts_roles$RoleCode[i]=="originator"){rp$setRole("originator")}
if (contacts_roles$RoleCode[i]=="principalInvestigator"){rp$setRole("originator")}
if (contacts_roles$RoleCode[i]=="data_structure_definition"){rp$setRole("processor")}
if (grepl("processor_step",contacts_roles$RoleCode[i])){rp$setRole("processor")}
# if (contacts_roles$RoleCode[i]=="pointOfContact"){rp$setRole("originator")}
###########################################################################################################
contact <- ISOContact$new()
phone <- ISOTelephone$new()
phone$setVoice(as.character(the_contact$voice))
phone$setFacsimile(as.character(the_contact$facsimile))
contact$setPhone(phone)
address <- ISOAddress$new()
address$setDeliveryPoint(as.character(the_contact$deliveryPoint))
address$setCity(as.character(the_contact$city))
address$setPostalCode(the_contact$postalCode)
address$setCountry(the_contact$country)
address$setEmail(the_contact$electronicMailAddress)
contact$setAddress(address)
res <- ISOOnlineResource$new()
res$setLinkage(the_contact$ISOOnlineResource)
res$setName(the_contact$setNameISOOnlineResource)
contact$setOnlineResource(res)
rp$setContactInfo(contact)
listContacts[[length(listContacts)+1]] <- rp
}
}
}
}
return(listContacts)
#Function over
}
#-----------------------------------------------------------------------------------------------------
#prepareDataQualityWithLineage
#@param config
#@param lineage_statement Statement to describe the overall lineage steps sequence
#@param lineage_steps a "list" object with the step description
#@param processors an object of class "list" giving a list "ISOResponsibleParty" corresponding to the processor(s) (for the time being common to each step)
#@param dataset_integration_date date of integration used commonly for all steps. 'ISOBaseDateTime' or POSIXct/POSIXt object
#@param contacts_roles
#@returns an object of class ISODataQuality
prepareDataQualityWithLineage <- function(config, lineage_statement, lineage_steps, dataset_integration_date,
metadata_identifier, contacts_roles){
dq <- ISODataQuality$new()
scope <- ISOScope$new()
scope$setLevel("dataset")
dq$setScope(scope)
#add lineage
lineage <- ISOLineage$new()
lineage$setStatement(lineage_statement)
#add processing steps
stepNb <- 1
for(step in lineage_steps){
ps <- ISOProcessStep$new()
ps$setDescription(sprintf("Step %s - %s", stepNb, step))
ps$setDateTime(dataset_integration_date)
#TODO
role=paste("processor_step",stepNb,sep="")
expected_role=c(role)
processors <- add_contacts_and_roles_OGC_19115(config, metadata_identifier, contacts_roles, expected_role)
for(processor in processors){
ps$addProcessor(processor)
}
lineage$addProcessStep(ps)
stepNb <- stepNb+1
}
#process step N: Data Publication in Tuna Atlas Catalogue
psN <- ISOProcessStep$new()
psN$setDescription(sprintf("Step %s - Data Publication in Tuna Atlas catalogue", stepNb))
psN$setDateTime(Sys.time())
#ONLY ONE PROCESSOR IN THIS CASE
expected_role=c("data_structure_definition")
processor <- add_contacts_and_roles_OGC_19115(config, metadata_identifier, contacts_roles, expected_role)
#do we need a processor when there is no data_structure_definition? ie when it's not a detailed dataset, or a dataset not compliant with FDI
if(length(processor)>0){psN$addProcessor(processor[[1]])}
lineage$addProcessStep(psN)
dq$setLineage(lineage)
return(dq)
}
#-----------------------------------------------------------------------------------------------------
#extractLineage
#@param lineage Lineage information as string extracted from metadata table in the database
# ("step: text1. step: text2. .... step: textN.")
#@returns an object of class "list" with the step contents
extractLineage <- function(lineage){
lineage_steps <- as.list(unlist(strsplit(lineage, "step[0-9]:"))) #@eblondel 12/08/2017 apply regular expression to detect step nb
lineage_steps <- lineage_steps[sapply(lineage_steps, function(x){return(nchar(x)>0)})]
lineage_steps <- lapply(lineage_steps, function(x){
out <- x
if(grepl("^ ", x)) out <- substr(x, 2, nchar(x))
if(grepl(" $", x)) out <- substr(out, nchar(out)-1, nchar(out))
return(out)
})
lineage_steps <- gsub("\n", "", lineage_steps)
return(lineage_steps)
}
#write_metadata_OGC_19115
write_metadata_OGC_19115_from_Dublin_Core <- function(config = NULL,
metadata = NULL,
contacts_metadata = NULL,
spatial_metadata = NULL,
temporal_metadata = NULL,
keywords_metadata = NULL, # DATAFRAME WITH ALL (STATIC & DYNAMIC) KEYWORDS
urls_metadata= NULL # LIST OF DYNAMIC / COMMON URLs
)
{
#config shortcuts
con <- config$sdi$db$con
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
# OGC 19115 SECTION => Metadata entity set information
#-------------------------------------------------------------------------------------------------------------------
logger.info("OGC 19115 SECTION => Metadata entity set information")
#create the ISO Metadata sheet and fill it with all required elements
md = ISOMetadata$new()
md$setFileIdentifier(metadata$Permanent_Identifier)
if(is.null(metadata$Parent_Metadata_Identifier)==FALSE){md$setParentIdentifier(metadata$Parent_Metadata_Identifier)} #=> TO BE DONE NEW ??? series software service
md$setLanguage(metadata$Language)# if metadata and resource have the same language
md$setCharacterSet("utf8")
# md <- ISOBaseCharacterString$new(value = "utf8") ???
md$addHierarchyLevel(metadata$addHierarchyLevel)# => not working ask Emmanuel Blondel
# md$setHierarchyLevelName("This datasets is the result of a query in a SQL DataWarehouse")
# TODO MANAGE "hierarchyLevelName" metadata element @julien
logger.info("Add the contacts and roles for this METADATA sheet")
expected_role=c("pointOfContact","metadata")
listContacts <- add_contacts_and_roles_OGC_19115(config, metadata$Identifier, contacts_metadata$contacts_roles, expected_role)
for(listContact in listContacts){
md$addContact(listContact)
}
# TODO endless discussion to define what date should be used with @paul & @emmanuel & @julien
#mdDate <- metadata$Date
# md$setDateStamp(ISOdate(2015, 1, 1, 1))
mdDate <- Sys.time()
md$setDateStamp(mdDate)
md$setMetadataStandardName("ISO 19115:2003/19139")
md$setMetadataStandardVersion("1.0")
# TODO decide if we should set up a URI @paul & @emmanuel & @julien
# md$setDataSetURI(paste(metadata$Identifier,"use a DOI instead ?",sep=" / "))
# md$setDataSetURI(metadata$Identifier)
logger.info("MD_Metadata section is set")
# OGC 19115 SECTION => Metadata entity set information
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("OGC 19115 SECTION => Spatial Representation")
logger.info("-------------------------------------------------------------------------------------------------------------------")
###########################################################################################
#VectorSpatialRepresentation
# ----------------
if (!is.null(spatial_metadata$SpatialRepresentationType)){
if (spatial_metadata$SpatialRepresentationType == "vector" ){
VSR <- ISOVectorSpatialRepresentation$new()
VSR$setTopologyLevel("geometryOnly")
geomObject <- ISOGeometricObjects$new()
geomObject$setGeometricObjectType(spatial_metadata$GeometricObjectType)
if(is.null(spatial_metadata$dynamic_metadata_count_features)==FALSE){
geomObject$setGeometricObjectCount(spatial_metadata$dynamic_metadata_count_features) #number of features
}
# VSR$setGeometricObjects(geomObject)
VSR$addGeometricObjects(geomObject)
# md$setSpatialRepresentationInfo(VSR)
md$addSpatialRepresentationInfo(VSR)
}
}
logger.info("SpatialRepresentation section is set !")
# OGC 19115 SECTION => Reference System
#-------------------------------------------------------------------------------------------------------------------
logger.info("OGC 19115 SECTION => Reference System ")
# TO BE DONE MANAGE VECTOR AND RASTER
if(is.null(spatial_metadata$SRID)==FALSE){
RS <- ISOReferenceSystem$new()
RSId <- ISOReferenceIdentifier$new(code = spatial_metadata$SRID[[1]], codeSpace = "EPSG")
RS$setReferenceSystemIdentifier(RSId)
md$setReferenceSystemInfo(RS)
}
logger.info("ReferenceSystem section is set !")
# OGC 19115 SECTION => Identification Section (MD_Identification)
#-------------------------------------------------------------------------------------------------------------------
logger.info("OGC 19115 SECTION => Identification Section (MD_Identification) ")
IDENT <- ISODataIdentification$new()
IDENT$setAbstract(metadata$Description)
#TODO @julien @paul => ADAPT WITH THE CONTENT OF THE "DESCRIPTION" COLUMN
# if(is.null(metadata$Purpose)==FALSE){
# IDENT$setPurpose(metadata$Purpose)
# }
# for(i in list_Credits){IDENT$addCredit(i)} # TO be done uncomment
IDENT$setLanguage(metadata$Language)
IDENT$setCharacterSet("utf8")
#topic categories (one or more)
# for(i in keywords_metadata$TopicCategory){IDENT$addTopicCategory(i)}
#adding a point of contact for the identificaiton
#organization contact
logger.info("Write contacts and roles for Data Identification Section")
expected_role=c("publisher","principalInvestigator")
listContacts <- add_contacts_and_roles_OGC_19115(config, metadata$Identifier, contacts_metadata$contacts_roles, expected_role)
for(listContact in listContacts){
# SERVICE$pointOfContact <- c(SERVICE$pointOfContact, serviceContact)
IDENT$addPointOfContact(listContact)
}
logger.info("Contacts for IDENTIFICATION SECTION added")
ct <- ISOCitation$new()
ct$setTitle(metadata$Title)
d <- ISODate$new()
d$setDate(mdDate)
d$setDateType("revision")
ct$addDate(d)
ct$setEdition("1.0")
ct$setEditionDate(as.Date(mdDate)) #EditionDate should be of Date type
# ct$setIdentifier(mdId)
ct$setIdentifier(ISOMetaIdentifier$new(code = metadata$Permanent_Identifier)) #Julien code à vérifier
ct$setPresentationForm("mapDigital") # @julien check relevant value in ISO 19115 CODE LIST
# TODO @julien CHECK IF ADDED CONTACT IS CORRECT IN THIS CONTEXT (SHOULD NOT => LAST FROM LIST ABOVE)
ct$setCitedResponsibleParty(listContact)
IDENT$setCitation(ct)
if(is.null(urls_metadata$http_urls)==FALSE){
logger.info("Add list of graphic overview")
number_row<-nrow(urls_metadata$http_urls)
for (i in 1:number_row){
if (startsWith(urls_metadata$http_urls$http_URLs_names[i],"thumbnail")){
go <- ISOBrowseGraphic$new(
fileName = urls_metadata$http_urls$http_URLs_links[i],
fileDescription = urls_metadata$http_urls$http_URLs_descriptions[i],
fileType = urls_metadata$http_urls$http_URLs_protocols[i]
)
IDENT$addGraphicOverview(go)
}
}
}
# Constraint information
#maintenance information
mi <- ISOMaintenanceInformation$new()
mi$setMaintenanceFrequency(metadata$Update_frequency)
IDENT$setResourceMaintenance(mi)
#adding legal constraint(s)
# Julien => information to be stored in the metadata table (not hard coded)
lc <- ISOLegalConstraints$new()
lc$addUseLimitation(metadata$Rights)
# lc$addUseLimitation("Use limitation 2 e.g. Citation guidelines")
# lc$addUseLimitation("Use limitation 3 e.g. Disclaimer")
# lc$addAccessConstraint("copyright")
# lc$addAccessConstraint("license")
# lc$addUseConstraint("copyright")
# lc$addUseConstraint("license")
IDENT$setResourceConstraints(lc)
#adding security constraints
# sc <- ISOSecurityConstraints$new()
# sc$setClassification("secret")
# sc$setUserNote("ultra secret")
# sc$setClassificationSystem("no classification in particular")
# sc$setHandlingDescription("description")
# IDENT$addResourceConstraints(sc)
# MD_Constraints
#adding extent: https://github.com/eblondel/geometa/blob/master/tests/testthat/test_ISOExtent.R
#adding SPATIAL extent: WHERE ?
extent <- ISOExtent$new()
# extent <- ISOSpatialTemporalExtent$new() #=> TO BE DONE REPLACE PREVIOUS VERSION ? https://github.com/eblondel/geometa/blob/master/tests/testthat/test_ISOSpatialTemporalExtent.R
spatialExtent <- ISOGeographicBoundingBox$new(minx = spatial_metadata$dynamic_metadata_spatial_Extent$xmin, miny=spatial_metadata$dynamic_metadata_spatial_Extent$ymin, maxx=spatial_metadata$dynamic_metadata_spatial_Extent$xmax, maxy=spatial_metadata$dynamic_metadata_spatial_Extent$ymax) #or use bbox parameter instead for specifying output of bbox(sp)
extent$addGeographicElement(spatialExtent)
logger.info("Spatial extent added!")
if(is.null(spatial_metadata$geographic_identifier)==FALSE){
for (i in 1:length(unique(spatial_metadata$geographic_identifier))) { # to be done with emmanuel => parentidentifier instead !!!
geographicIdentifier <- ISOGeographicDescription$new()
geographicIdentifier$setGeographicIdentifier(ISOMetaIdentifier$new(code = spatial_metadata$geographic_identifier[i] ))
extent$addGeographicElement(geographicIdentifier)
}
}
#adding TEMPORAL extent: WHEN ? https://github.com/eblondel/geometa/blob/master/tests/testthat/test_ISOTemporalExtent.R
if(is.null(temporal_metadata$dynamic_metadata_temporal_Extent)==FALSE){
time <- ISOTemporalExtent$new()
start_date <- temporal_metadata$dynamic_metadata_temporal_Extent$start_date
end_date <- temporal_metadata$dynamic_metadata_temporal_Extent$end_date
cat("start_date")
cat(start_date)
cat("end_date")
cat(end_date)
temporalExtent <- GMLTimePeriod$new(beginPosition = start_date, endPosition = end_date)
time$setTimePeriod(temporalExtent)
extent$setTemporalElement(time)
}
IDENT$setExtent(extent)
logger.info("Temporal extent added!")
#add keywords: WHAT ?
#you can associate many "ISOKeywords" which is a group of keyword
#giving to each group a specific thematic thesaurus (e.g. gears, species, etc)
#add general static keywords for this dataset
#--------------------------------------------
different_thesaurus <- unique(keywords_metadata$all_keywords$thesaurus)
number_thesaurus<-length(unique(different_thesaurus))
for(t in 1:number_thesaurus){
logger.info(sprintf("Creating a new thesarus keywords set '%s'",different_thesaurus[t]))
if(is.null(keywords_metadata$all_keywords)==FALSE){
dynamic_keywords <- ISOKeywords$new()
number_row<-nrow(keywords_metadata$all_keywords)
for (i in 1:number_row) {
if(keywords_metadata$all_keywords$thesaurus[i]==different_thesaurus[t]){dynamic_keywords$addKeyword(keywords_metadata$all_keywords$keyword[i])}
}
dynamic_keywords$setKeywordType("theme") # to be done "place" for spatial Thesaurus..
# Specifiy Thesaurus
th_general_keywords <- ISOCitation$new()
th_general_keywords$setTitle(different_thesaurus[t])
th_general_keywords$addDate(d)
dynamic_keywords$setThesaurusName(th_general_keywords)
IDENT$addKeywords(dynamic_keywords)
}
}
#supplementalInformation
IDENT$setSupplementalInformation("to add in case additional information")
#spatial representation type
# TODO voir la différence avec l'autre spatial_metadata$SpatialRepresentationType
if (!is.null(spatial_metadata$SpatialRepresentationType)){
if (spatial_metadata$SpatialRepresentationType == "vector" ){
IDENT$addSpatialRepresentationType(spatial_metadata$SpatialRepresentationType)
}
}
md$addIdentificationInfo(IDENT)
logger.info("Identification information (MD_Identification) section is set!")
# OGC 19115 SECTION => Identification with ISO 19119 Service Identification
#-------------------------------------------------------------------------------------------------------------------
# TODO @julien => REMOVED FROM TUNA ATLAS (NOT GENERIC)
# OGC 19115 SECTION => Distribution
#-------------------------------------------------------------------------------------------------------------------
distrib <- ISODistribution$new()
dto <- ISODigitalTransferOptions$new()
logger.info("Select the set of URLs to be displayed as OnlineResource")
if(is.null(urls_metadata$http_urls)==FALSE){
number_row<-nrow(urls_metadata$http_urls)
for (i in 1:number_row) {
if (startsWith(urls_metadata$http_urls$http_URLs_names[i],"thumbnail")==FALSE){
newURL <- ISOOnlineResource$new()
newURL$setLinkage(urls_metadata$http_urls$http_URLs_links[i])
newURL$setName(urls_metadata$http_urls$http_URLs_names[i])
newURL$setDescription(urls_metadata$http_urls$http_URLs_descriptions[i])
newURL$setProtocol(urls_metadata$http_urls$http_URLs_protocols[i])
dto$addOnlineResource(newURL)
}
}
}
distrib$setDigitalTransferOptions(dto)
format <- ISOFormat$new()
format$setName(metadata$Format)
format$setVersion("Postgres 9 and Postgis 2") # to be done => stored in the spreadsheet
# format$setAmendmentNumber("2")
# format$setSpecification("specification")
distrib$addFormat(format)
logger.info("Write DistributionInfo section")
#add as many online resources you need (WMS, WFS, website link, etc)
md$setDistributionInfo(distrib)
# OGC 19115 SECTION => Data Quality
#-------------------------------------------------------------------------------------------------------------------
#add Data / lineage for steps
# TODO @julien @paul @emmanuel => REPLACE THIS TEXT BY REAL DESCRIPTION WHEN READY
#example of lineage
lineage_statement <- "Data management workflow description"
lineage_steps <- list()
lineage <- metadata$Lineage
if(!is.na(lineage) & metadata$Dataset_Type!="NetCDF" & metadata$Dataset_Type!="google_doc"){
logger.info("Add Lineage process steps")
#create lineage
lineage_steps <- extractLineage(lineage)
DQ1 <- prepareDataQualityWithLineage(config, lineage_statement, lineage_steps, mdDate, metadata$Identifier, contacts_metadata$contacts_roles)
md$addDataQualityInfo(DQ1)
}
#Data Quality Genealogy => @julien REMOVED (TUNA ATLAS SPECIFIC)
#----------------------
logger.info("Data Quality Info section added")
# OGC 19115 SECTION => Content Info -> FeatureCatalogueDescription => REMOVED FROM GENERIC WORKFLOW AT THIS STAGE
#-------------------------------------------------------------------------------------------------------------------
return(md)
}
#push_metadata_in_geonetwork
#@param config
#@param metadata$Permanent_Identifier
#@param md
push_metadata_in_geonetwork <- function(config, metadata_permanent_id, md){
#config shortcuts
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
#config shortcuts
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
GN <- config$sdi$geonetwork$api
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("set shortcuts for Geonetwork config")
logger.info("-------------------------------------------------------------------------------------------------------------------")
#to insert or update a metadata into a geonetwork.
#An insert has to be done in 2 operations (the insert itself, and the privilege setting to "publish" it either to a restrained group or to public)
#An update has to be done based on the internal Geonetwork id (that can be queried as well
privileges <- c("view","dynamic")
if(is(md, "ISOMetadata")){
privileges <- c(privileges, "featured")
}
metaId <- GN$get(metadata_permanent_id, by = "uuid", output = "id")
if(is.null(metaId)){
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("insert metadata (once inserted only visible to the publisher)")
logger.info("-------------------------------------------------------------------------------------------------------------------")
created = GN$insertMetadata(xml = md$encode(), group = "1", category = "datasets")
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("config privileges")
config <- GNPrivConfiguration$new()
config$setPrivileges("all", privileges)
GN$setPrivConfiguration(id = created, config = config)
}else{
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("update the metadata")
logger.info("-------------------------------------------------------------------------------------------------------------------")
updated = GN$updateMetadata(id = metaId, xml = md$encode())
logger.info("-------------------------------------------------------------------------------------------------------------------")
logger.info("config privileges")
config <- GNPrivConfiguration$new()
config$setPrivileges("all", privileges)
GN$setPrivConfiguration(id = metaId, config = config)
}
md_url <- paste(config$sdi$geonetwork$url, "/srv/eng/catalog.search#/metadata/",metadata_permanent_id,sep="")
return(md_url)
}
push_metadata_in_csw_server <- function(config,metadata_identifier,md){
#config shortcuts
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
#shortcut for CSW-T server config
CSW_URL <- config$sdi$csw_server$url
CSW_admin <- config$sdi$csw_server$user
CSW_password <- config$sdi$csw_server$pwd
csw <- CSWClient$new(CSW_URL, "2.0.2", user = CSW_admin, CSW_password,logger="INFO")
record <-NULL
#get record by id
CQL <- paste0("dc:identifier = '",metadata_identifier,"'")
cons <- CSWConstraint$new(cqlText = CQL)
query <- CSWQuery$new(constraint = cons)
record <- csw$getRecords(query = query)
if(length(record)==0){
logger.info("The metadata doesn't exist: creating it !")
insert <- csw$insertRecord(record = md)
return(insert)
} else {
logger.info("The metadata already exists: updating it !")
update <- csw$updateRecord(record = md)
update$getResult() #TRUE if updated, FALSE otherwise
return(update)
}
} |
library(rlang)
library(shiny)
library(data.table)
library(DT)
library(stringi)
require(reshape2)
library(ggcorrplot)
library(shinydashboardPlus)
library(shinyFiles)
library(rvest)
library(data.table)
rm(list=ls())
library(rvest)
library(data.table)
library(plotly)
#setwd("../.")
#update with latest data
source("./scripts/join_zip_raw.R") #deletes workspace
#source functions
source("./scripts/source_func_download_SSI_Data.R")
source("./scripts/source_model.R")
#read processed raw figures
admissions = fread("./data/SSI_daily_hosp_processed/fulltable_joined.csv")
X = admission2trainX(admissions,6)
#setwd("./shiny")
#standard linear regression models for each lag
models_reg_ss = list(
m1 = lm(x6~x1 ,data=X),
m2 = lm(x6~x2 ,data=X),
m3 = lm(x6~x3 ,data=X),
m4 = lm(x6~x4 ,data=X),
m5 = lm(x6~x5 ,data=X)
) %>%
lapply(add_class,newclass="simple_univariate_lm") %>% #tag each model with non-formula interface
add_class("model_list") #interface for pair wise prediction of model_list and input data list
#OLR (ordinary least roots. OLS with a mean average deviation loss)
models_lin_mad = list(
m1 = OLR(X$x1,X$x6),
m2 = OLR(X$x2,X$x6),
m3 = OLR(X$x3,X$x6),
m4 = OLR(X$x4,X$x6),
m5 = OLR(X$x5,X$x6)
) %>% add_class("model_list")
reg_models = (do.call(rbind,lapply(models_reg_ss,coef)))
coef.OLR_model = function(x) c("(intercept)"=0,b = x$minimum)
mad_models=do.call(rbind,lapply(models_lin_mad,coef))
| /global.R | permissive | covid19dk/fastRestimation | R | false | false | 1,447 | r | library(rlang)
library(shiny)
library(data.table)
library(DT)
library(stringi)
require(reshape2)
library(ggcorrplot)
library(shinydashboardPlus)
library(shinyFiles)
library(rvest)
library(data.table)
rm(list=ls())
library(rvest)
library(data.table)
library(plotly)
#setwd("../.")
#update with latest data
source("./scripts/join_zip_raw.R") #deletes workspace
#source functions
source("./scripts/source_func_download_SSI_Data.R")
source("./scripts/source_model.R")
#read processed raw figures
admissions = fread("./data/SSI_daily_hosp_processed/fulltable_joined.csv")
X = admission2trainX(admissions,6)
#setwd("./shiny")
#standard linear regression models for each lag
models_reg_ss = list(
m1 = lm(x6~x1 ,data=X),
m2 = lm(x6~x2 ,data=X),
m3 = lm(x6~x3 ,data=X),
m4 = lm(x6~x4 ,data=X),
m5 = lm(x6~x5 ,data=X)
) %>%
lapply(add_class,newclass="simple_univariate_lm") %>% #tag each model with non-formula interface
add_class("model_list") #interface for pair wise prediction of model_list and input data list
#OLR (ordinary least roots. OLS with a mean average deviation loss)
models_lin_mad = list(
m1 = OLR(X$x1,X$x6),
m2 = OLR(X$x2,X$x6),
m3 = OLR(X$x3,X$x6),
m4 = OLR(X$x4,X$x6),
m5 = OLR(X$x5,X$x6)
) %>% add_class("model_list")
reg_models = (do.call(rbind,lapply(models_reg_ss,coef)))
coef.OLR_model = function(x) c("(intercept)"=0,b = x$minimum)
mad_models=do.call(rbind,lapply(models_lin_mad,coef))
|
install.packages("vars")
library(vars)
install.packages("mFilter")
library(mFilter)
library(tseries)
install.packages("TSstudio")
library(TSstudio)
library(forecast)
library(tidyverse)
data <- read_csv('DummyData.csv')
head(data)
str(data)
GDP <- ts(data$`GDP Growth`, start = c(2000,1,1), frequency = 12)
AvSP <- ts(data$`Monthly Average Stock Price`, start = c(2000,1,1), frequency = 12)
infrat <- ts(data$`Monthly Inflation Rate`, start = c(2000,1,1), frequency = 12)
exports <- ts(data$`Monthly Exports`, start = c(2000,1,1), frequency = 12)
imports <- ts(data$`Monthly Imports`, start = c(2000,1,1), frequency = 12)
M1 <- ts(data$`Money Supply M1`, start = c(2000,1,1), frequency = 12)
oilprod <- ts(data$`Crude Oil Production`, start = c(2000,1,1), frequency = 12)
unempdata <- ts(data$`Unemployment Rate`, start = c(2000,1,1), frequency = 12)
emprat <- ts(data$`Employment-to-Population Ratio`, start = c(2000,1,1), frequency = 12)
ts_plot(GDP)
ts_plot(AvSP)
ts_plot(infrat)
ts_plot(exports)
ts_plot(imports)
ts_plot(M1)
ts_plot(oilprod)
ts_plot(unempdata)
ts_plot(emprat)
#Use Phillips-Perron Test to determine whether to reject the null hypothesis of unit root
#Stationary if reject null hypothesis
pp.test(GDP)
pp.test(AvSP) #not stationary
pp.test(infrat)
pp.test(exports)
pp.test(imports)
pp.test(M1) #not stationary
pp.test(oilprod)
pp.test(unempdata)
pp.test(emprat)
pp.test(df_AvSP)
pp.test(df_oilprod)
v1 <- cbind(GDP, infrat, exports,imports, oilprod, unempdata, emprat)
colnames(v1) <- cbind("GDP", 'infrat', 'exports','imports', 'oilprod', 'unempdata', 'emprat')
lagselect <- VARselect(v1, lag.max = 24, type = "const")
lagselect$selection
Model1 <- VAR(v1, p = 2, type = "const", season = NULL, exog = NULL)
#summary(Model1)
for (i in 1:60)
{
Serial1 <- serial.test(Model1, lags.pt = i, type = "PT.asymptotic")
print(Serial1)
}
for (i in 1:10)
{
Arch1 <- arch.test(Model1, lags.multi = i, multivariate.only = TRUE)
print(Arch1)
}
par("mar")
par(mar=c(1,1,1,1))
Stability1 <- stability(Model1, type = "OLS-CUSUM")
plot(Stability1)
trainingdata <- window(v1, c(2000,1), c(2017,12))
testingdata <- window(v1, c(2018,1), c(2020,12))
v <- VAR(trainingdata, p=2)
p <- predict(v, n.ahead=36)
res <- residuals(v)
fits <- fitted(v)
for(i in 1:7)
{
fc <- structure(list(mean=p$fcst[[i]][,"fcst"], x=trainingdata[,i],
fitted=c(NA,NA,fits[,i])),class="forecast")
print(accuracy(fc,testingdata[,i]))
}
fc <- structure(list(mean=p$fcst[[1]][,"fcst"], x=trainingdata[,1],
fitted=c(NA,NA,fits[,1])),class="forecast")
fc
print(accuracy(fc,testingdata[,1]))
forecast <- predict(Model1, c(2000,1), c(2020,12), n.ahead = 12)
forecast
| /VAR.R | no_license | jaylum94/econometrics_deep_learning_comparison | R | false | false | 2,824 | r | install.packages("vars")
library(vars)
install.packages("mFilter")
library(mFilter)
library(tseries)
install.packages("TSstudio")
library(TSstudio)
library(forecast)
library(tidyverse)
data <- read_csv('DummyData.csv')
head(data)
str(data)
GDP <- ts(data$`GDP Growth`, start = c(2000,1,1), frequency = 12)
AvSP <- ts(data$`Monthly Average Stock Price`, start = c(2000,1,1), frequency = 12)
infrat <- ts(data$`Monthly Inflation Rate`, start = c(2000,1,1), frequency = 12)
exports <- ts(data$`Monthly Exports`, start = c(2000,1,1), frequency = 12)
imports <- ts(data$`Monthly Imports`, start = c(2000,1,1), frequency = 12)
M1 <- ts(data$`Money Supply M1`, start = c(2000,1,1), frequency = 12)
oilprod <- ts(data$`Crude Oil Production`, start = c(2000,1,1), frequency = 12)
unempdata <- ts(data$`Unemployment Rate`, start = c(2000,1,1), frequency = 12)
emprat <- ts(data$`Employment-to-Population Ratio`, start = c(2000,1,1), frequency = 12)
ts_plot(GDP)
ts_plot(AvSP)
ts_plot(infrat)
ts_plot(exports)
ts_plot(imports)
ts_plot(M1)
ts_plot(oilprod)
ts_plot(unempdata)
ts_plot(emprat)
#Use Phillips-Perron Test to determine whether to reject the null hypothesis of unit root
#Stationary if reject null hypothesis
pp.test(GDP)
pp.test(AvSP) #not stationary
pp.test(infrat)
pp.test(exports)
pp.test(imports)
pp.test(M1) #not stationary
pp.test(oilprod)
pp.test(unempdata)
pp.test(emprat)
pp.test(df_AvSP)
pp.test(df_oilprod)
v1 <- cbind(GDP, infrat, exports,imports, oilprod, unempdata, emprat)
colnames(v1) <- cbind("GDP", 'infrat', 'exports','imports', 'oilprod', 'unempdata', 'emprat')
lagselect <- VARselect(v1, lag.max = 24, type = "const")
lagselect$selection
Model1 <- VAR(v1, p = 2, type = "const", season = NULL, exog = NULL)
#summary(Model1)
for (i in 1:60)
{
Serial1 <- serial.test(Model1, lags.pt = i, type = "PT.asymptotic")
print(Serial1)
}
for (i in 1:10)
{
Arch1 <- arch.test(Model1, lags.multi = i, multivariate.only = TRUE)
print(Arch1)
}
par("mar")
par(mar=c(1,1,1,1))
Stability1 <- stability(Model1, type = "OLS-CUSUM")
plot(Stability1)
trainingdata <- window(v1, c(2000,1), c(2017,12))
testingdata <- window(v1, c(2018,1), c(2020,12))
v <- VAR(trainingdata, p=2)
p <- predict(v, n.ahead=36)
res <- residuals(v)
fits <- fitted(v)
for(i in 1:7)
{
fc <- structure(list(mean=p$fcst[[i]][,"fcst"], x=trainingdata[,i],
fitted=c(NA,NA,fits[,i])),class="forecast")
print(accuracy(fc,testingdata[,i]))
}
fc <- structure(list(mean=p$fcst[[1]][,"fcst"], x=trainingdata[,1],
fitted=c(NA,NA,fits[,1])),class="forecast")
fc
print(accuracy(fc,testingdata[,1]))
forecast <- predict(Model1, c(2000,1), c(2020,12), n.ahead = 12)
forecast
|
def main(rule_args, callback, rei):
source_file = global_vars['*SourceFile'][1:-1]
dest_file = global_vars['*DestFile'][1:-1]
callback.msiDataObjCopy(source_file, dest_file, 'forceFlag=', 0)
callback.writeLine('stdout', 'File ' + source_file + ' copied to ' + dest_file)
INPUT *SourceFile="/tempZone/home/rods/sub1/foo1", *DestFile="/tempZone/home/rods/sub2/foo1"
OUTPUT ruleExecOut
| /python_rules/rulemsiDataObjCopy.r | no_license | irods/irods_rule_engine_plugin_python | R | false | false | 401 | r | def main(rule_args, callback, rei):
source_file = global_vars['*SourceFile'][1:-1]
dest_file = global_vars['*DestFile'][1:-1]
callback.msiDataObjCopy(source_file, dest_file, 'forceFlag=', 0)
callback.writeLine('stdout', 'File ' + source_file + ' copied to ' + dest_file)
INPUT *SourceFile="/tempZone/home/rods/sub1/foo1", *DestFile="/tempZone/home/rods/sub2/foo1"
OUTPUT ruleExecOut
|
library(data.table)
library(reshape2)
setwd('C:/Users/jmaburto/Documents/GitHub Backup 2/CoD-burden-on-LA')
#source('R/7_1Europe.R')
load('Data/Europe_Data.RData')
### group causes of death by 2010-2015 or +
CoD.data <- Deaths.data
unique(CoD.data$Year)
CoD.data$Period <- 2010
CoD.data$Period2 <- CoD.data$Period
CoD.data$Period2 <- factor(CoD.data$Period2,levels=2010,labels=c('2010-2015'))
CoD.data2 <- CoD.data[, list(Female=sum(Female)),by= list(X,Cause,Age,Country,Age2,Period2)]
CoD.data2$Male <- CoD.data[, list(Male=sum(Male)),by= list(X,Cause,Age,Country,Age2,Period2)]$Male
### Add code to HMD to have comparable datasets by country
Country.code.vec <- c(4010,4020,4050,4070,4080,4085,4170,4180,4190,4210,4240,4280,4290,4308,4140)
Country.code.vec2 <- c(4010,4020,4050,4070,4080,4085,4170,4180,4190,4210,4240,4280,4290,4308,4140)
Country.name.vec <- toupper(c('Austria','Belgium','Denmark','Finland','France','Germany','Ireland','Italy',
'Luxembourg','Netherlands','Portugal','Spain','Sweden','UK','Greece'))
Country.HMD <- c('AUT','BEL','DNK','FIN','FRATNP','DEUTNP','IRL','ITA','LUX','NLD','PRT','ESP','SWE','GBR_NP','GRC')
names(Country.code.vec2) <- Country.HMD
HMDL$X <- Country.code.vec2[as.character(HMDL$PopName)]
### Now create a master lifetable for Europe and CoD proportions (remember to add category 15)
Countries <- unique(HMDL$X)
Sex <- c('f','m')
Master_mx.f <- rowMeans(acast(HMDL[HMDL$Sex == 'f'],Age ~ X, value.var = 'mx'),na.rm = T)
Master_mx.m <- rowMeans(acast(HMDL[HMDL$Sex == 'm'],Age ~ X, value.var = 'mx'),na.rm = T)
Master_mx.f[19] <- mean(Master_mx.f[19:24],na.rm = T)
Master_mx.f <- Master_mx.f[1:19]
Master_mx.m[19] <- mean(Master_mx.m[19:24],na.rm = T)
Master_mx.m <- Master_mx.m[1:19]
### Now a master CoD matrix
CoD.data3.f <- CoD.data2[,sum(Female),by = list(Cause,Age2)]
new.cat <- acast(CoD.data3.f, Age2~Cause, value.var = "V1")[,1]-rowSums(acast(CoD.data3.f, Age2~Cause, value.var = "V1")[,2:14])
Master.CoD.f <- cbind(acast(CoD.data3.f, Age2~Cause, value.var = "V1")[,2:14],new.cat)
colnames(Master.CoD.f) <- 2:15
CoD.data3.m <- CoD.data2[,sum(Male),by = list(Cause,Age2)]
new.cat <- acast(CoD.data3.m, Age2~Cause, value.var = "V1")[,1]-rowSums(acast(CoD.data3.m, Age2~Cause, value.var = "V1")[,2:14])
Master.CoD.m <- cbind(acast(CoD.data3.m, Age2~Cause, value.var = "V1")[,2:14],new.cat)
colnames(Master.CoD.m) <- 2:15
# now convert to proportions
t1 <- rowSums(Master.CoD.f)
t2 <- rowSums(Master.CoD.m)
Master.CoD.f <- Master.CoD.f/t1
Master.CoD.m <- Master.CoD.m/t2
#### Now do decomposition
load("Outcomes/Harmonized_CoDData.RData")
load('Outcomes/Data_Lifetables.RData')
gdata::keep(Rest,Data.LT,Master.CoD.f,Master.CoD.m,Master_mx.f,Master_mx.m,sure=T)
source('R/Functions_LT.R')
Decomp.comparision <- NULL
UND <- Rest[(Rest$X == 2170|Rest$X == 2140|Rest$X == 2190|Rest$X == 2380|Rest$X == 2440|
Rest$X == 2250|Rest$X == 2310|Rest$X == 2340|Rest$X == 2280|Rest$X == 2350|Rest$X == 2290|
Rest$X == 2020|Rest$X == 2120|Rest$X == 2130|
Rest$X == 2180|Rest$X == 2360|Rest$X == 2370|
Rest$X == 2460|Rest$X == 2070|Rest$X == 2470|
Rest$X == 2150),]
UNLT <- Data.LT[(Data.LT$Code == 2170|Data.LT$Code == 2140|Data.LT$Code == 2190|Data.LT$Code == 2380|Data.LT$Code == 2440|
Data.LT$Code == 2280|Data.LT$Code == 2350|Data.LT$Code == 2290|
Data.LT$Code == 2250|Data.LT$Code == 2310|Data.LT$Code == 2340|
Data.LT$Code == 2020|Data.LT$Code == 2120|Data.LT$Code == 2130|
Data.LT$Code == 2180|Data.LT$Code == 2360|Data.LT$Code == 2370|
Data.LT$Code == 2460|Data.LT$Code == 2070|Data.LT$Code == 2470|
Data.LT$Code == 2150) & Data.LT$Source == 'UN',]
unique(UND$Country)
unique(UNLT$Country)
unique(UNLT$Year)
unique(UND$Year)
## Aggregate deaths in periods of 5 years to get more robust CoD decomp
Periods <- seq(1990,2010,5)
Period.labels <- unique(UNLT$Period)
UND$Period5 <- (cut(UND$Year+1, breaks=c(Periods,Inf),labels=Period.labels))
UND2 <- UND[,list(Female=sum(Female)), by = list(X,Cause,Age,Country,Age2,Cause.name,Source,Period5)]
UND2$Male <- UND[,list(Male=sum(Male)), by = list(X,Cause,Age,Country,Age2,Cause.name,Source,Period5)]$Male
unique(UND2$Period5)
UND3 <- UND2[UND2$Period5=='2010-2015',]
### Now a master CoD matrix
CoD.data.LAC.f <- UND3[,sum(Female),by = list(Cause,Age2)]
Master.CoD.fLAC <- acast(CoD.data.LAC.f, Age2~Cause, value.var = "V1")
CoD.data.LAC.m <- UND3[,sum(Male),by = list(Cause,Age2)]
Master.CoD.mLAC <- acast(CoD.data.LAC.m, Age2~Cause, value.var = "V1")
# now convert to proportions
t1.LAC <- rowSums(Master.CoD.fLAC)
t2.LAC <- rowSums(Master.CoD.mLAC)
Master.CoD.FLAC <- Master.CoD.fLAC/t1.LAC
Master.CoD.MLAC <- Master.CoD.mLAC/t2.LAC
### Ok, we can perform decomp for UN
UNLT$Sex <- as.numeric(UNLT$Sex)
Sex <- unique(UNLT$Sex)
Countries <- unique(UND2$X)
UND2$Period5 <- as.character(UND2$Period5)
#k <- 2
#l <-3
#j <-2280
#j <-2290
#j <-2350
## Because everything is different and we need separate files for each country, I'll do this with loops
for (j in Countries){
print(j)
for (k in Sex){
if (k == 1) sex.col <- 10
if (k == 2) sex.col <- 9
if (k == 1) Master.mx <- Master_mx.m
if (k == 2) Master.mx <- Master_mx.f
if (k == 1) Master.CoD <- Master.CoD.m
if (k == 2) Master.CoD <- Master.CoD.f
# subset data with info requireMethods
new.data <- UNLT[UNLT$Code==j & UNLT$Sex==k,]
new.CoD <- UND2[UND2$X==j,]
# find out for which periods we have information
P1 <- unique(new.data$Period)
P2 <- unique(new.CoD$Period5)
Periods <- P1
if ((length(P2) - length(P1))!= 0) print('Problem')
l <- length(Periods)
print(l)
# get vectors of mx
p1.data <- new.data[new.data$Period == Periods[l],]
# get proportions of causes of death for these periods in matrix format
CoD.1 <- new.CoD[new.CoD$Period5 == Periods[l],]
R1 <- acast(CoD.1, Age2 ~ Cause, value.var = colnames(CoD.1)[sex.col],fill = 0,drop = F)
if (rowSums(R1)[dim(R1)[1]]==0) R1[dim(R1)[1],dim(R1)[2]] <- 1
if (rowSums(R1)[dim(R1)[1]-1]==0) R1[dim(R1)[1]-1,dim(R1)[2]] <- 1
R1.1 <- R1/rowSums(R1)
if (sum(rowSums(R1.1)) != 19) print('Dimension R1')
# calculate age decomposition
mx1 <- p1.data$mx
mx2 <- Master.mx
Decomp.age <- AgeDecomp(mx1=mx1,mx2=mx2,Age=p1.data$Age,Sex=k)
print(sum(Decomp.age)-(e0.from.mx(mx2,p1.data$Age,k)-e0.from.mx(mx1,p1.data$Age,k)))
# calculate cause-specific contributions
R2.1 <- Master.CoD
Decomp.CoD <- Decomp.age*((R2.1*mx2)-(R1.1*mx1))/(mx2-mx1)
Decomp.CoD[is.infinite(Decomp.CoD)] <- 0
Decomp.CoD[is.na(Decomp.CoD)] <- 0
print(sum(Decomp.CoD)-sum(Decomp.age))
# Dataframe with decomposition results
Results <- melt(Decomp.CoD,varnames = c('Age','Cause'),value.name = 'Contribution')
Results$Period1 <- Periods[l]
Results$Age <- rep(p1.data$Age,14)
Results$Sex <- k
Results$Country <- j
Results$Sources <- 'UN'
Results$e01 <- e0.from.mx(mx = mx1,Ages=p1.data$Age,Sex=k)
Results$e02 <- e0.from.mx(mx = mx2,Ages=p1.data$Age,Sex=k)
Decomp.comparision <- rbind(Decomp.comparision,Results)
}
}
Results <- NULL
for (k in Sex){
if (k == 1) Master.mx <- Master_mx.m
if (k == 2) Master.mx <- Master_mx.f
if (k == 1) Master.CoD <- Master.CoD.m
if (k == 2) Master.CoD <- Master.CoD.f
if (k == 1) Master.mx.LAC <- Data.LT[Data.LT$Country=='LATIN AMERICA' & Data.LT$Source=='UN' &
Data.LT$Sex =='1' & Data.LT$Period=='2010-2015']
if (k == 2) Master.mx.LAC <- Data.LT[Data.LT$Country=='LATIN AMERICA' & Data.LT$Source=='UN' &
Data.LT$Sex =='2' & Data.LT$Period=='2010-2015']
if (k == 1) Master.CoD.LAC <- Master.CoD.MLAC
if (k == 2) Master.CoD.LAC <- Master.CoD.FLAC
R1.1 <- Master.CoD.LAC
R2.1 <- Master.CoD
mx1 <- Master.mx.LAC$mx
mx2 <- Master.mx
Decomp.age <- AgeDecomp(mx1=mx1,mx2=mx2,Age=p1.data$Age,Sex=k)
print(sum(Decomp.age)-(e0.from.mx(mx2,p1.data$Age,k)-e0.from.mx(mx1,p1.data$Age,k)))
# calculate cause-specific contributions
Decomp.CoD <- Decomp.age*((R2.1*mx2)-(R1.1*mx1))/(mx2-mx1)
Decomp.CoD[is.infinite(Decomp.CoD)] <- 0
Decomp.CoD[is.na(Decomp.CoD)] <- 0
print(sum(Decomp.CoD)-sum(Decomp.age))
# Dataframe with decomposition results
Results <- melt(Decomp.CoD,varnames = c('Age','Cause'),value.name = 'Contribution')
Results$Period1 <- '2010-2015'
Results$Age <- rep(Master.mx.LAC$Age,14)
Results$Sex <- k
Results$Country <- 9999
Results$Sources <- 'UN'
Results$e01 <- e0.from.mx(mx = mx1,Ages=p1.data$Age,Sex=k)
Results$e02 <- e0.from.mx(mx = mx2,Ages=p1.data$Age,Sex=k)
Decomp.comparision <- rbind(Decomp.comparision,Results)
}
## Now code mortality from age 80 as 'Rest' since it is unreliable
Decomp.comparision <- data.table(Decomp.comparision)
Decomp.comparision[Decomp.comparision$Age >= 80]$Cause <- 15
Decomp.comparision <- Decomp.comparision[,list(Contribution=sum(Contribution)), by = list(Age,Cause,Period1,Sex,Country,Sources,e01,e02)]
unique(Decomp.comparision$Country)
cause.name.vec <- c('Total', 'Infectious','Neoplasms', 'Circulatory','Abnormal', 'Mental',
'Nervous','Endocrine','Digestive',
'Genitourinary','Perinatal','Respiratory','External','Homicide','Rest')
Decomp.comparision$Cause.name <- Decomp.comparision$Cause
Decomp.comparision$Cause.name <- factor(Decomp.comparision$Cause.name, levels = 1:15, labels = cause.name.vec)
Country.name.vec <- toupper(c('Cuba','Dominican Republic','Jamaica','Puerto Rico',
'Trinidad and Tobago','Costa Rica','El Salvador','Guatemala',
'Honduras','Mexico','Nicaragua','Panama','Argentina',
'Chile','Colombia','Ecuador','Paraguay','Peru','Uruguay','Venezuela',
'Haiti','Bolivia','Brazil', 'Latin America'))
# Create a vector with the countries' codes according to WHO
Country.code.vec <- c(2150,2170,2290,2380,2440,2140,2190,2250,2280,2310,
2340,2350,2020,2120,2130,2180,2360,2370,2460,2470,2270,2060,2070,9999)
names(Country.code.vec) <- Country.name.vec
Decomp.comparision$Country.name <- Decomp.comparision$Country
Decomp.comparision$Country.name <- factor(Decomp.comparision$Country.name, levels = Country.code.vec,
labels = Country.name.vec)
unique(Decomp.comparision$Country.name)
Decomp.comparision <- Decomp.comparision
Decomp.comparision[Decomp.comparision$Country.name=='PARAGUAY',]
Decomp.comparision[Decomp.comparision$Period1 == '2010-2015']$Period1 <- '2010-2014'
save(Decomp.comparision, file = 'Outcomes/Decomp_results_Europe.RData')
save(Decomp.comparision, file = 'R/Decomp_App/Decomp_results_Europe.RData')
save(Decomp.comparision, file = 'R/Decomp_Ranking_App/Decomp_results_Europe.RData')
| /WB Outcomes/RCode/7_2Europe.R | no_license | jmaburto/CoD-burden-on-LA | R | false | false | 11,456 | r | library(data.table)
library(reshape2)
setwd('C:/Users/jmaburto/Documents/GitHub Backup 2/CoD-burden-on-LA')
#source('R/7_1Europe.R')
load('Data/Europe_Data.RData')
### group causes of death by 2010-2015 or +
CoD.data <- Deaths.data
unique(CoD.data$Year)
CoD.data$Period <- 2010
CoD.data$Period2 <- CoD.data$Period
CoD.data$Period2 <- factor(CoD.data$Period2,levels=2010,labels=c('2010-2015'))
CoD.data2 <- CoD.data[, list(Female=sum(Female)),by= list(X,Cause,Age,Country,Age2,Period2)]
CoD.data2$Male <- CoD.data[, list(Male=sum(Male)),by= list(X,Cause,Age,Country,Age2,Period2)]$Male
### Add code to HMD to have comparable datasets by country
Country.code.vec <- c(4010,4020,4050,4070,4080,4085,4170,4180,4190,4210,4240,4280,4290,4308,4140)
Country.code.vec2 <- c(4010,4020,4050,4070,4080,4085,4170,4180,4190,4210,4240,4280,4290,4308,4140)
Country.name.vec <- toupper(c('Austria','Belgium','Denmark','Finland','France','Germany','Ireland','Italy',
'Luxembourg','Netherlands','Portugal','Spain','Sweden','UK','Greece'))
Country.HMD <- c('AUT','BEL','DNK','FIN','FRATNP','DEUTNP','IRL','ITA','LUX','NLD','PRT','ESP','SWE','GBR_NP','GRC')
names(Country.code.vec2) <- Country.HMD
HMDL$X <- Country.code.vec2[as.character(HMDL$PopName)]
### Now create a master lifetable for Europe and CoD proportions (remember to add category 15)
Countries <- unique(HMDL$X)
Sex <- c('f','m')
Master_mx.f <- rowMeans(acast(HMDL[HMDL$Sex == 'f'],Age ~ X, value.var = 'mx'),na.rm = T)
Master_mx.m <- rowMeans(acast(HMDL[HMDL$Sex == 'm'],Age ~ X, value.var = 'mx'),na.rm = T)
Master_mx.f[19] <- mean(Master_mx.f[19:24],na.rm = T)
Master_mx.f <- Master_mx.f[1:19]
Master_mx.m[19] <- mean(Master_mx.m[19:24],na.rm = T)
Master_mx.m <- Master_mx.m[1:19]
### Now a master CoD matrix
CoD.data3.f <- CoD.data2[,sum(Female),by = list(Cause,Age2)]
new.cat <- acast(CoD.data3.f, Age2~Cause, value.var = "V1")[,1]-rowSums(acast(CoD.data3.f, Age2~Cause, value.var = "V1")[,2:14])
Master.CoD.f <- cbind(acast(CoD.data3.f, Age2~Cause, value.var = "V1")[,2:14],new.cat)
colnames(Master.CoD.f) <- 2:15
CoD.data3.m <- CoD.data2[,sum(Male),by = list(Cause,Age2)]
new.cat <- acast(CoD.data3.m, Age2~Cause, value.var = "V1")[,1]-rowSums(acast(CoD.data3.m, Age2~Cause, value.var = "V1")[,2:14])
Master.CoD.m <- cbind(acast(CoD.data3.m, Age2~Cause, value.var = "V1")[,2:14],new.cat)
colnames(Master.CoD.m) <- 2:15
# now convert to proportions
t1 <- rowSums(Master.CoD.f)
t2 <- rowSums(Master.CoD.m)
Master.CoD.f <- Master.CoD.f/t1
Master.CoD.m <- Master.CoD.m/t2
#### Now do decomposition
load("Outcomes/Harmonized_CoDData.RData")
load('Outcomes/Data_Lifetables.RData')
gdata::keep(Rest,Data.LT,Master.CoD.f,Master.CoD.m,Master_mx.f,Master_mx.m,sure=T)
source('R/Functions_LT.R')
Decomp.comparision <- NULL
UND <- Rest[(Rest$X == 2170|Rest$X == 2140|Rest$X == 2190|Rest$X == 2380|Rest$X == 2440|
Rest$X == 2250|Rest$X == 2310|Rest$X == 2340|Rest$X == 2280|Rest$X == 2350|Rest$X == 2290|
Rest$X == 2020|Rest$X == 2120|Rest$X == 2130|
Rest$X == 2180|Rest$X == 2360|Rest$X == 2370|
Rest$X == 2460|Rest$X == 2070|Rest$X == 2470|
Rest$X == 2150),]
UNLT <- Data.LT[(Data.LT$Code == 2170|Data.LT$Code == 2140|Data.LT$Code == 2190|Data.LT$Code == 2380|Data.LT$Code == 2440|
Data.LT$Code == 2280|Data.LT$Code == 2350|Data.LT$Code == 2290|
Data.LT$Code == 2250|Data.LT$Code == 2310|Data.LT$Code == 2340|
Data.LT$Code == 2020|Data.LT$Code == 2120|Data.LT$Code == 2130|
Data.LT$Code == 2180|Data.LT$Code == 2360|Data.LT$Code == 2370|
Data.LT$Code == 2460|Data.LT$Code == 2070|Data.LT$Code == 2470|
Data.LT$Code == 2150) & Data.LT$Source == 'UN',]
unique(UND$Country)
unique(UNLT$Country)
unique(UNLT$Year)
unique(UND$Year)
## Aggregate deaths in periods of 5 years to get more robust CoD decomp
Periods <- seq(1990,2010,5)
Period.labels <- unique(UNLT$Period)
UND$Period5 <- (cut(UND$Year+1, breaks=c(Periods,Inf),labels=Period.labels))
UND2 <- UND[,list(Female=sum(Female)), by = list(X,Cause,Age,Country,Age2,Cause.name,Source,Period5)]
UND2$Male <- UND[,list(Male=sum(Male)), by = list(X,Cause,Age,Country,Age2,Cause.name,Source,Period5)]$Male
unique(UND2$Period5)
UND3 <- UND2[UND2$Period5=='2010-2015',]
### Now a master CoD matrix
CoD.data.LAC.f <- UND3[,sum(Female),by = list(Cause,Age2)]
Master.CoD.fLAC <- acast(CoD.data.LAC.f, Age2~Cause, value.var = "V1")
CoD.data.LAC.m <- UND3[,sum(Male),by = list(Cause,Age2)]
Master.CoD.mLAC <- acast(CoD.data.LAC.m, Age2~Cause, value.var = "V1")
# now convert to proportions
t1.LAC <- rowSums(Master.CoD.fLAC)
t2.LAC <- rowSums(Master.CoD.mLAC)
Master.CoD.FLAC <- Master.CoD.fLAC/t1.LAC
Master.CoD.MLAC <- Master.CoD.mLAC/t2.LAC
### Ok, we can perform decomp for UN
UNLT$Sex <- as.numeric(UNLT$Sex)
Sex <- unique(UNLT$Sex)
Countries <- unique(UND2$X)
UND2$Period5 <- as.character(UND2$Period5)
#k <- 2
#l <-3
#j <-2280
#j <-2290
#j <-2350
## Because everything is different and we need separate files for each country, I'll do this with loops
for (j in Countries){
print(j)
for (k in Sex){
if (k == 1) sex.col <- 10
if (k == 2) sex.col <- 9
if (k == 1) Master.mx <- Master_mx.m
if (k == 2) Master.mx <- Master_mx.f
if (k == 1) Master.CoD <- Master.CoD.m
if (k == 2) Master.CoD <- Master.CoD.f
# subset data with info requireMethods
new.data <- UNLT[UNLT$Code==j & UNLT$Sex==k,]
new.CoD <- UND2[UND2$X==j,]
# find out for which periods we have information
P1 <- unique(new.data$Period)
P2 <- unique(new.CoD$Period5)
Periods <- P1
if ((length(P2) - length(P1))!= 0) print('Problem')
l <- length(Periods)
print(l)
# get vectors of mx
p1.data <- new.data[new.data$Period == Periods[l],]
# get proportions of causes of death for these periods in matrix format
CoD.1 <- new.CoD[new.CoD$Period5 == Periods[l],]
R1 <- acast(CoD.1, Age2 ~ Cause, value.var = colnames(CoD.1)[sex.col],fill = 0,drop = F)
if (rowSums(R1)[dim(R1)[1]]==0) R1[dim(R1)[1],dim(R1)[2]] <- 1
if (rowSums(R1)[dim(R1)[1]-1]==0) R1[dim(R1)[1]-1,dim(R1)[2]] <- 1
R1.1 <- R1/rowSums(R1)
if (sum(rowSums(R1.1)) != 19) print('Dimension R1')
# calculate age decomposition
mx1 <- p1.data$mx
mx2 <- Master.mx
Decomp.age <- AgeDecomp(mx1=mx1,mx2=mx2,Age=p1.data$Age,Sex=k)
print(sum(Decomp.age)-(e0.from.mx(mx2,p1.data$Age,k)-e0.from.mx(mx1,p1.data$Age,k)))
# calculate cause-specific contributions
R2.1 <- Master.CoD
Decomp.CoD <- Decomp.age*((R2.1*mx2)-(R1.1*mx1))/(mx2-mx1)
Decomp.CoD[is.infinite(Decomp.CoD)] <- 0
Decomp.CoD[is.na(Decomp.CoD)] <- 0
print(sum(Decomp.CoD)-sum(Decomp.age))
# Dataframe with decomposition results
Results <- melt(Decomp.CoD,varnames = c('Age','Cause'),value.name = 'Contribution')
Results$Period1 <- Periods[l]
Results$Age <- rep(p1.data$Age,14)
Results$Sex <- k
Results$Country <- j
Results$Sources <- 'UN'
Results$e01 <- e0.from.mx(mx = mx1,Ages=p1.data$Age,Sex=k)
Results$e02 <- e0.from.mx(mx = mx2,Ages=p1.data$Age,Sex=k)
Decomp.comparision <- rbind(Decomp.comparision,Results)
}
}
Results <- NULL
for (k in Sex){
if (k == 1) Master.mx <- Master_mx.m
if (k == 2) Master.mx <- Master_mx.f
if (k == 1) Master.CoD <- Master.CoD.m
if (k == 2) Master.CoD <- Master.CoD.f
if (k == 1) Master.mx.LAC <- Data.LT[Data.LT$Country=='LATIN AMERICA' & Data.LT$Source=='UN' &
Data.LT$Sex =='1' & Data.LT$Period=='2010-2015']
if (k == 2) Master.mx.LAC <- Data.LT[Data.LT$Country=='LATIN AMERICA' & Data.LT$Source=='UN' &
Data.LT$Sex =='2' & Data.LT$Period=='2010-2015']
if (k == 1) Master.CoD.LAC <- Master.CoD.MLAC
if (k == 2) Master.CoD.LAC <- Master.CoD.FLAC
R1.1 <- Master.CoD.LAC
R2.1 <- Master.CoD
mx1 <- Master.mx.LAC$mx
mx2 <- Master.mx
Decomp.age <- AgeDecomp(mx1=mx1,mx2=mx2,Age=p1.data$Age,Sex=k)
print(sum(Decomp.age)-(e0.from.mx(mx2,p1.data$Age,k)-e0.from.mx(mx1,p1.data$Age,k)))
# calculate cause-specific contributions
Decomp.CoD <- Decomp.age*((R2.1*mx2)-(R1.1*mx1))/(mx2-mx1)
Decomp.CoD[is.infinite(Decomp.CoD)] <- 0
Decomp.CoD[is.na(Decomp.CoD)] <- 0
print(sum(Decomp.CoD)-sum(Decomp.age))
# Dataframe with decomposition results
Results <- melt(Decomp.CoD,varnames = c('Age','Cause'),value.name = 'Contribution')
Results$Period1 <- '2010-2015'
Results$Age <- rep(Master.mx.LAC$Age,14)
Results$Sex <- k
Results$Country <- 9999
Results$Sources <- 'UN'
Results$e01 <- e0.from.mx(mx = mx1,Ages=p1.data$Age,Sex=k)
Results$e02 <- e0.from.mx(mx = mx2,Ages=p1.data$Age,Sex=k)
Decomp.comparision <- rbind(Decomp.comparision,Results)
}
## Now code mortality from age 80 as 'Rest' since it is unreliable
Decomp.comparision <- data.table(Decomp.comparision)
Decomp.comparision[Decomp.comparision$Age >= 80]$Cause <- 15
Decomp.comparision <- Decomp.comparision[,list(Contribution=sum(Contribution)), by = list(Age,Cause,Period1,Sex,Country,Sources,e01,e02)]
unique(Decomp.comparision$Country)
cause.name.vec <- c('Total', 'Infectious','Neoplasms', 'Circulatory','Abnormal', 'Mental',
'Nervous','Endocrine','Digestive',
'Genitourinary','Perinatal','Respiratory','External','Homicide','Rest')
Decomp.comparision$Cause.name <- Decomp.comparision$Cause
Decomp.comparision$Cause.name <- factor(Decomp.comparision$Cause.name, levels = 1:15, labels = cause.name.vec)
Country.name.vec <- toupper(c('Cuba','Dominican Republic','Jamaica','Puerto Rico',
'Trinidad and Tobago','Costa Rica','El Salvador','Guatemala',
'Honduras','Mexico','Nicaragua','Panama','Argentina',
'Chile','Colombia','Ecuador','Paraguay','Peru','Uruguay','Venezuela',
'Haiti','Bolivia','Brazil', 'Latin America'))
# Create a vector with the countries' codes according to WHO
Country.code.vec <- c(2150,2170,2290,2380,2440,2140,2190,2250,2280,2310,
2340,2350,2020,2120,2130,2180,2360,2370,2460,2470,2270,2060,2070,9999)
names(Country.code.vec) <- Country.name.vec
Decomp.comparision$Country.name <- Decomp.comparision$Country
Decomp.comparision$Country.name <- factor(Decomp.comparision$Country.name, levels = Country.code.vec,
labels = Country.name.vec)
unique(Decomp.comparision$Country.name)
Decomp.comparision <- Decomp.comparision
Decomp.comparision[Decomp.comparision$Country.name=='PARAGUAY',]
Decomp.comparision[Decomp.comparision$Period1 == '2010-2015']$Period1 <- '2010-2014'
save(Decomp.comparision, file = 'Outcomes/Decomp_results_Europe.RData')
save(Decomp.comparision, file = 'R/Decomp_App/Decomp_results_Europe.RData')
save(Decomp.comparision, file = 'R/Decomp_Ranking_App/Decomp_results_Europe.RData')
|
#--- acf and msd functions -------------------------------------------------
#' Autocorrelation of fBM Increments
#'
#' @param alpha Subdiffusion exponent.
#' @param dT Interobservation time.
#' @param N Number of increment observations.
#' @return An autocorrelation vector of length \eqn{N}.
#' @details
#' The fBM increment autocorrelation is given by:
#' \deqn{\frac{1}{2} \Delta t^\alpha \left[|n-1|^\alpha + |n+1|^\alpha - 2n^\alpha \right]}
#' @examples
#' fbm.acf(alpha = 0.5, dT = 1/60, N = 10)
#' @export
fbm.acf <- function(alpha, dT, N) {
if(N == 1) {
acf <- dT^alpha
} else {
acf <- (dT*(0:N))^alpha
acf <- .5 * (acf[1:N+1] + c(acf[2], acf[1:(N-1)]) - 2*acf[1:N])
}
acf
}
#' Autocorrelation of Squared-Exponential
#'
#' @param lambda timescale.
#' @param dT interobservation time.
#' @param N Number of increment observations.
#' @param incr logical; whether or not to return increments.
#' @return An autocorrelation vector of length \eqn{N}.
#' @details
#' The Squared-Exponential autocorrelation is given by:
#' \deqn{\exp \{-(\frac{n\Delta t}{\lambda})^2\}}
#' @examples
#' exp2.acf(lambda = 1, dT = 1/60, N = 200, incr = FALSE)
#' @export
exp2.acf <- function(lambda, dT, N, incr = TRUE) {
# process autocorrelation
gam <- exp(-(0:N*dT/lambda)^2)
if(incr) {
# increments
ans <- acf2incr(gam)
} else {
# observations
ans <- gam[1:N]
}
ans
}
#' Autocorrelation of Exponential
#'
#' @param lambda timescale.
#' @param dT interobservation time.
#' @param N Number of increment observations.
#' @param incr logical; whether or not to return increments.
#' @return An autocorrelation vector of length \eqn{N}.
#' @details
#' The Exponential Autocorrelation is given by:
#' \deqn{\exp \{-\frac{n\Delta t}{\lambda}\}}
#' @examples
#' exp1.acf(lambda = 1, dT = 1/60, N = 200, incr = FALSE)
#' @export
exp1.acf <- function(lambda, dT, N, incr = TRUE) {
# process autocorrelation
gam <- exp(-(0:N*dT/lambda))
if(incr) {
# increments
ans <- acf2incr(gam)
} else {
# observations
ans <- gam[1:N]
}
ans
}
#' Convert the Position ACF to Increment ACF
#'
#' Converts the autocorrelation of a stationary sequence \eqn{\{X_0, X_1, ..., X_N\}} to those of
#' its increments \eqn{\{X_1-X_0, X_2-X_1, ..., X_N - X_{N-1}\} }.
#' @param gam An autocorrelation sequence of length \eqn{N}.
#' @return An increment autocorrelation sequence of length \eqn{N-1}.
#' @examples
#' acf1 <- runif(10)
#' acf2incr(acf1)
#' @export
acf2incr <- function(gam) {
N <- length(gam)-1
if(N == 1) {
igam <- 2*(gam[1]-gam[2])
} else {
igam <- 2*gam[1:N] - gam[1:N+1] - gam[c(2, 1:(N-1))]
}
igam
}
#' Convert the Position MSD to Increment ACF
#'
#' Converts the MSD of a regularly sampled stationary-increments process \eqn{ \{X_0, X_1, ..., X_N\} }
#' into the ACF of its increments, \eqn{ \{X_1-X_0, X_2-X_1, ..., X_N - X_{N-1}\} }.
#' @param eta the MSD at \eqn{N} regular time points, \eqn{ \{\Delta t, 2\Delta t, ..., N\Delta t\} }.
#' @return the ACF at lags \eqn{ \{0, 1, ..., N-1\} }.
#' @examples
#' msd1 <- runif(10)
#' msd2acf(msd1)
#' @export
msd2acf <- function(eta) {
N <- length(eta)
gam <- rep(NA, N)
Gam <- diff(c(0, eta))
gam[-1] <- .5 * diff(Gam)
gam[1] <- eta[1]
gam
}
#' Convert the Increment ACF to Position MSD
#'
#' Converts the ACF of the increment of a regularly sampled stationary-increments process
#' \eqn{ \{X_1-X_0, X_2-X_1, ..., X_N - X_{N-1} \} } into the MSD of original process \eqn{ \{X_0, X_1, ..., X_N\} }.
#' @param gam the ACF at \eqn{N} lags, \eqn{ \{0, 1, ..., N-1\} }.
#' @return the MSD at regular time points \eqn{ \{\Delta t, 2\Delta t, ..., N\Delta t\} }.
#' @examples
#' acf1 <- runif(10)
#' acf2msd(acf1)
#' @export
acf2msd <- function(gam){
N <- length(gam)
eta <- rep(NA, N)
eta[1] <- gam[1]
eta[2] <- 2 * (gam[2] + eta[1])
for(ii in 3:N){
eta[ii] <- 2 * (gam[ii] + eta[ii - 1]) - eta[ii - 2]
}
eta
}
#' MSD of fBM + Dynamic Error
#'
#' @param alpha Subdiffusion exponent
#' @param tau Width of averaging time-window.
#' @param t Vector of time points \eqn{ \{\Delta t, 2\Delta t, ..., N\Delta t\} }
#' @details
#' this function returns the MSD of \eqn{Y_t}, the integral of fBM process \eqn{X_t} with subdiffusion
#' exponent \eqn{\alpha} \deqn{Y_t = \int_{0}^{\tau} X(t-s)ds}. The expression of the MSD is
#' \deqn{\frac{(t+\tau)^\alpha + (t-\tau)^\alpha - 2t^\alpha - 2\tau^\alpha}{(\alpha+1)(\alpha+2)}}
#' @examples
#' fdyn.msd(alpha = 0.8, tau = 1/600, t = (1:200) * 1/60)
#' @export
fdyn.msd <- function(alpha, tau, t){
tau <- t/tau
alpha2 <- alpha+2
eta <- ((tau+1)^alpha2 + (tau-1)^alpha2 - 2*tau^alpha2 - 2)/alpha2
eta * tau^alpha/(alpha+1)
}
#' ACF of fBM + Dynamic Error Increments
#'
#' @param alpha Subdiffusion exponent
#' @param tau Width of averaging time-window.
#' @param dT interobservation time.
#' @param N Number of increment observations.
#' @details this function returns the autocorrelation of the increment of \eqn{Y_t}, the integral of
#' fBM process \eqn{X_t} with subdiffusion exponent \eqn{\alpha} \deqn{Y_t = \int_{0}^{\tau} X(t-s)ds}
#' @examples
#' fdyn.acf(alpha = 0.8, tau = 1/600, dT = 1/60, N = 200)
#' @export
fdyn.acf <- function(alpha, tau, dT, N) {
eta <- fdyn.msd(alpha, tau, dT*1:N)
msd2acf(eta)
}
| /R/old/acf-functions.R | no_license | neery1218/SuperGauss | R | false | false | 5,312 | r | #--- acf and msd functions -------------------------------------------------
#' Autocorrelation of fBM Increments
#'
#' @param alpha Subdiffusion exponent.
#' @param dT Interobservation time.
#' @param N Number of increment observations.
#' @return An autocorrelation vector of length \eqn{N}.
#' @details
#' The fBM increment autocorrelation is given by:
#' \deqn{\frac{1}{2} \Delta t^\alpha \left[|n-1|^\alpha + |n+1|^\alpha - 2n^\alpha \right]}
#' @examples
#' fbm.acf(alpha = 0.5, dT = 1/60, N = 10)
#' @export
fbm.acf <- function(alpha, dT, N) {
if(N == 1) {
acf <- dT^alpha
} else {
acf <- (dT*(0:N))^alpha
acf <- .5 * (acf[1:N+1] + c(acf[2], acf[1:(N-1)]) - 2*acf[1:N])
}
acf
}
#' Autocorrelation of Squared-Exponential
#'
#' @param lambda timescale.
#' @param dT interobservation time.
#' @param N Number of increment observations.
#' @param incr logical; whether or not to return increments.
#' @return An autocorrelation vector of length \eqn{N}.
#' @details
#' The Squared-Exponential autocorrelation is given by:
#' \deqn{\exp \{-(\frac{n\Delta t}{\lambda})^2\}}
#' @examples
#' exp2.acf(lambda = 1, dT = 1/60, N = 200, incr = FALSE)
#' @export
exp2.acf <- function(lambda, dT, N, incr = TRUE) {
# process autocorrelation
gam <- exp(-(0:N*dT/lambda)^2)
if(incr) {
# increments
ans <- acf2incr(gam)
} else {
# observations
ans <- gam[1:N]
}
ans
}
#' Autocorrelation of Exponential
#'
#' @param lambda timescale.
#' @param dT interobservation time.
#' @param N Number of increment observations.
#' @param incr logical; whether or not to return increments.
#' @return An autocorrelation vector of length \eqn{N}.
#' @details
#' The Exponential Autocorrelation is given by:
#' \deqn{\exp \{-\frac{n\Delta t}{\lambda}\}}
#' @examples
#' exp1.acf(lambda = 1, dT = 1/60, N = 200, incr = FALSE)
#' @export
exp1.acf <- function(lambda, dT, N, incr = TRUE) {
# process autocorrelation
gam <- exp(-(0:N*dT/lambda))
if(incr) {
# increments
ans <- acf2incr(gam)
} else {
# observations
ans <- gam[1:N]
}
ans
}
#' Convert the Position ACF to Increment ACF
#'
#' Converts the autocorrelation of a stationary sequence \eqn{\{X_0, X_1, ..., X_N\}} to those of
#' its increments \eqn{\{X_1-X_0, X_2-X_1, ..., X_N - X_{N-1}\} }.
#' @param gam An autocorrelation sequence of length \eqn{N}.
#' @return An increment autocorrelation sequence of length \eqn{N-1}.
#' @examples
#' acf1 <- runif(10)
#' acf2incr(acf1)
#' @export
acf2incr <- function(gam) {
N <- length(gam)-1
if(N == 1) {
igam <- 2*(gam[1]-gam[2])
} else {
igam <- 2*gam[1:N] - gam[1:N+1] - gam[c(2, 1:(N-1))]
}
igam
}
#' Convert the Position MSD to Increment ACF
#'
#' Converts the MSD of a regularly sampled stationary-increments process \eqn{ \{X_0, X_1, ..., X_N\} }
#' into the ACF of its increments, \eqn{ \{X_1-X_0, X_2-X_1, ..., X_N - X_{N-1}\} }.
#' @param eta the MSD at \eqn{N} regular time points, \eqn{ \{\Delta t, 2\Delta t, ..., N\Delta t\} }.
#' @return the ACF at lags \eqn{ \{0, 1, ..., N-1\} }.
#' @examples
#' msd1 <- runif(10)
#' msd2acf(msd1)
#' @export
msd2acf <- function(eta) {
N <- length(eta)
gam <- rep(NA, N)
Gam <- diff(c(0, eta))
gam[-1] <- .5 * diff(Gam)
gam[1] <- eta[1]
gam
}
#' Convert the Increment ACF to Position MSD
#'
#' Converts the ACF of the increment of a regularly sampled stationary-increments process
#' \eqn{ \{X_1-X_0, X_2-X_1, ..., X_N - X_{N-1} \} } into the MSD of original process \eqn{ \{X_0, X_1, ..., X_N\} }.
#' @param gam the ACF at \eqn{N} lags, \eqn{ \{0, 1, ..., N-1\} }.
#' @return the MSD at regular time points \eqn{ \{\Delta t, 2\Delta t, ..., N\Delta t\} }.
#' @examples
#' acf1 <- runif(10)
#' acf2msd(acf1)
#' @export
acf2msd <- function(gam){
N <- length(gam)
eta <- rep(NA, N)
eta[1] <- gam[1]
eta[2] <- 2 * (gam[2] + eta[1])
for(ii in 3:N){
eta[ii] <- 2 * (gam[ii] + eta[ii - 1]) - eta[ii - 2]
}
eta
}
#' MSD of fBM + Dynamic Error
#'
#' @param alpha Subdiffusion exponent
#' @param tau Width of averaging time-window.
#' @param t Vector of time points \eqn{ \{\Delta t, 2\Delta t, ..., N\Delta t\} }
#' @details
#' this function returns the MSD of \eqn{Y_t}, the integral of fBM process \eqn{X_t} with subdiffusion
#' exponent \eqn{\alpha} \deqn{Y_t = \int_{0}^{\tau} X(t-s)ds}. The expression of the MSD is
#' \deqn{\frac{(t+\tau)^\alpha + (t-\tau)^\alpha - 2t^\alpha - 2\tau^\alpha}{(\alpha+1)(\alpha+2)}}
#' @examples
#' fdyn.msd(alpha = 0.8, tau = 1/600, t = (1:200) * 1/60)
#' @export
fdyn.msd <- function(alpha, tau, t){
tau <- t/tau
alpha2 <- alpha+2
eta <- ((tau+1)^alpha2 + (tau-1)^alpha2 - 2*tau^alpha2 - 2)/alpha2
eta * tau^alpha/(alpha+1)
}
#' ACF of fBM + Dynamic Error Increments
#'
#' @param alpha Subdiffusion exponent
#' @param tau Width of averaging time-window.
#' @param dT interobservation time.
#' @param N Number of increment observations.
#' @details this function returns the autocorrelation of the increment of \eqn{Y_t}, the integral of
#' fBM process \eqn{X_t} with subdiffusion exponent \eqn{\alpha} \deqn{Y_t = \int_{0}^{\tau} X(t-s)ds}
#' @examples
#' fdyn.acf(alpha = 0.8, tau = 1/600, dT = 1/60, N = 200)
#' @export
fdyn.acf <- function(alpha, tau, dT, N) {
eta <- fdyn.msd(alpha, tau, dT*1:N)
msd2acf(eta)
}
|
rm(list=ls())
## Set directory
setwd("~/")
if("XSub"%in%dir()){setwd("~/XSub/Data/")}
if("Dropbox2"%in%dir()){setwd("~/Dropbox2/Dropbox (Zhukov research team)/XSub/Data/")}
# setwd("F:/Dropbox (Zhukov research team)/XSub/Data/")
## Install & load packages (all at once)
list.of.packages <- c("gdata","countrycode","maptools","foreign","plotrix","sp","raster","rgeos","gdata","parallel")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]; if(length(new.packages)){install.packages(new.packages,dependencies=TRUE)}
lapply(list.of.packages, require, character.only = TRUE)
## Load custom functions
source("Code/functions.R")
#############################
## Create event-level data
#############################
## Load event type dictionary
load("Dictionaries/EventTypes/Combined_EventDictionary.RData")
source("Code/step2_eventcode/step2x_event_types_list.R")
source("Code/step2_eventcode/step2x_eventType_function.R")
# Load events
# # Clean up
# ged.raw <- read.csv("Input/Events/UCDP_GED/ged171.csv")
# countrylist <- sort(unique(as.character(ged.raw$country)))
# countrylist <- data.frame(country=countrylist,iso3=countrycode(countrylist,origin="country.name",destination="iso3c"))
# for(j in 1:2){countrylist[,j]<-as.character(countrylist[,j])}
# countrylist[countrylist$country=="Yemen (North Yemen)","iso3"] <- countrycode("Yemen","country.name","iso3c")
# countrylist[countrylist$country=="Serbia (Yugoslavia)","iso3"] <- countrycode("Serbia","country.name","iso3c")
# ged.raw <- merge(ged.raw,countrylist,by="country",all.x=T,all.y=T)
# save(ged.raw,file="Input/Events/UCDP_GED/ged171.RData")
# Load Syria data (for some reason missing from v 17.1)
load("Input/Events/UCDP_GED/ged30.RData")
data <- ged.raw[ged.raw$iso3%in%"SYR",]
classez<-c();for(j in 1:ncol(data)){classez[j]<-class(data[,j]);if(classez[j]=="factor"){data[,j]<-as.character(data[,j])}}
names(data) <- toupper(names(data))
syr.raw <- data
head(syr.raw); rm(data)
## Load raw data
load("Input/Events/UCDP_GED/ged171.RData")
data <- ged.raw
classez<-c();for(j in 1:ncol(data)){classez[j]<-class(data[,j]);if(classez[j]=="factor"){data[,j]<-as.character(data[,j])}}
names(data) <- toupper(names(data))
head(data)
# Merge
commonvars <- intersect(names(data),names(syr.raw))
data <- rbind(data[,commonvars],syr.raw[,commonvars])
head(data); rm(syr.raw,commonvars)
# Precision codes
names(data)
sort(unique(data$WHERE_PREC))
data$GEOPRECISION0 <- "settlement"
data$GEOPRECISION0[which(data$WHERE_PREC%in%c(2,3))] <- "adm2"
data$GEOPRECISION0[which(data$WHERE_PREC%in%c(4,5))] <- "adm1"
data$GEOPRECISION0[which(data$WHERE_PREC%in%c(6,7))] <- "adm0"
data$TIMEPRECISION0 <- "day"
data$TIMEPRECISION0[which(data$DATE_PREC%in%c(2,3))] <- "week"
data$TIMEPRECISION0[which(data$DATE_PREC%in%c(4))] <- "month"
data$TIMEPRECISION0[which(data$DATE_PREC%in%c(5))] <- "year"
tail(data)
# Initiator classification
init.type <- "_init"
## By country
disag <- sort(unique(data$ISO3))
## Check missing dictionaries
disag[!disag%in%sapply(strsplit(dir("Dictionaries/GED/Combined/"),"_"),"[",2)]
j <- 1; disag[j]
# # Open loop, Single-Core
# ged.list <- lapply(1:length(disag),function(j){print(j)
# Open loop, Multi-Core
ged.list <- mclapply(1:length(disag),function(j){print(j)
# Subset
subdata <- data[data$ISO3==disag[j],]
head(subdata)
# Dates & locations
sub.datez <- gsub("-","",subdata$DATE_START)
sub.lat <- subdata$LATITUDE
sub.long <- subdata$LONGITUDE
sub.precis <- subdata$GEOPRECISION0
sub.tprecis <- subdata$TIMEPRECISION0
sub0 <- data.frame(SOURCE=paste0("GED_v171"),CONFLICT=countrycode(disag[j],"iso3c","country.name"),COWN=countrycode(disag[j],origin = "iso3c",destination = "cown"),COWC=countrycode(disag[j],origin = "iso3c",destination = "cowc"),ISO3=countrycode(disag[j],origin = "iso3c",destination = "iso3c"),DATE=as.character(sub.datez),LAT=sub.lat,LONG=sub.long,GEOPRECISION=sub.precis,TIMEPRECISION=sub.tprecis)
# Event Types (use dictionary)
head(subdata)
subdata$ID_TEMP <- 1:nrow(subdata)
subdata$TEXT <- subdata$SOURCE_ARTICLE
subdata$TEXT <- iconv(subdata$TEXT,"WINDOWS-1252","UTF-8")
subdata$TEXT <- tolower(subdata$TEXT)
textvar <- "TEXT"
idvar <- "ID_TEMP"
length(unique(subdata[,idvar]))==nrow(subdata)
events0 <- eventType(subdata=subdata,idvar=idvar,textvar=textvar,term.type=term.type,types.specific=types.specific,types.general=types.general)
summary(events0)
# Tactic-based actor coding
if(init.type%in%c("_init")){
load("DataCensus/TacticsActors_v1.RData")
# tactic.ix <- data.frame(ACTION=types.specific,INITIATOR="",stringsAsFactors = FALSE)
# tactic.ix[tactic.ix$ACTION%in%c("AIRSTRIKE","ARMOR","ARREST","RIOTCONTROL"),"INITIATOR"] <- "SIDEA"
# tactic.ix[tactic.ix$ACTION%in%c("PROTEST","PROTEST_V","RIOT","SUICIDE","TERROR"),"INITIATOR"] <- "SIDEB"
}
# Actors (use pre-existing dictionaries)
dir("Dictionaries/GED")
if(paste0("GED_",toupper(disag[j]),"_Actors.RData")%in%dir("Dictionaries/GED/Combined/")){load(paste0("Dictionaries/GED/Combined/GED_",toupper(disag[j]),"_Actors.RData"))}
if(length(actorlist$actors_GOV)>0){actorlist$actors_GOV <- trim(sapply(strsplit(actorlist$actors_GOV,split="\\(\\d{4}|\\(Inf"), '[', 1))}
if(length(actorlist$actors_REB)>0){actorlist$actors_REB <- trim(sapply(strsplit(actorlist$actors_REB,split="\\(\\d{4}|\\(Inf"), '[', 1))}
if(length(actorlist$actors_CIV)>0){actorlist$actors_CIV <- trim(sapply(strsplit(actorlist$actors_CIV,split="\\(\\d{4}|\\(Inf"), '[', 1))}
if(length(actorlist$actors_OTH)>0){actorlist$actors_OTH <- trim(sapply(strsplit(actorlist$actors_OTH,split="\\(\\d{4}|\\(Inf"), '[', 1))}
sub0$INITIATOR_SIDEA <- 1*(subdata$SIDE_A%in%actorlist$actors_GOV)
sub0$INITIATOR_SIDEB <- 1*(subdata$SIDE_A%in%actorlist$actors_REB)
sub0$INITIATOR_SIDEC <- 1*(subdata$SIDE_A%in%actorlist$actors_CIV)
sub0$INITIATOR_SIDED <- 1*((subdata$SIDE_A%in%actorlist$actors_OTH)|(subdata$SIDE_A%in%actorlist$actors&(!subdata$SIDE_A%in%c(actorlist$actors_GOV,actorlist$actors_REB,actorlist$actors_CIV))))
sub0$TARGET_SIDEA <- 1*(subdata$SIDE_B%in%actorlist$actors_GOV)
sub0$TARGET_SIDEB <- 1*(subdata$SIDE_B%in%actorlist$actors_REB)
sub0$TARGET_SIDEC <- 1*(subdata$SIDE_B%in%actorlist$actors_CIV)
sub0$TARGET_SIDED <- 1*((subdata$SIDE_B%in%actorlist$actors_OTH)|(subdata$SIDE_B%in%actorlist$actors&(!subdata$SIDE_B%in%c(actorlist$actors_GOV,actorlist$actors_REB,actorlist$actors_CIV))))
# Tactic-based event intiator fix
if(init.type%in%c("_init")){
sub2 <- sub0
flipz.a <- apply(events0[,paste0("ACTION_",tactic.ix$ACTION[tactic.ix$INITIATOR%in%"SIDEA"])],1,function(x){sum(x,na.rm=T)})>0
flipz.ab <- flipz.a&sub0$INITIATOR_SIDEB==1
flipz.aba <- flipz.ab&flipz.a&sub0$TARGET_SIDEA==1
flipz.b <- apply(events0[,paste0("ACTION_",tactic.ix$ACTION[tactic.ix$INITIATOR%in%"SIDEB"])],1,function(x){sum(x,na.rm=T)})>0
flipz.ba <- flipz.b&sub0$INITIATOR_SIDEA==1
flipz.bab <- flipz.ba&flipz.b&sub0$TARGET_SIDEB==1
sub2$INITIATOR_SIDEA[flipz.ab] <- 1
sub2$INITIATOR_SIDEB[flipz.ab] <- 0
sub2$TARGET_SIDEA[flipz.aba] <- 0
sub2$TARGET_SIDEB[flipz.aba] <- 1
sub2$INITIATOR_SIDEA[flipz.ba] <- 0
sub2$INITIATOR_SIDEB[flipz.ba] <- 1
sub2$TARGET_SIDEA[flipz.bab] <- 1
sub2$TARGET_SIDEB[flipz.bab] <- 0
sub0 <- sub2
}
# Dyads
sub0$DYAD_A_A <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEA
sub0$DYAD_A_B <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEB
sub0$DYAD_A_C <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEC
sub0$DYAD_A_D <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDED
sub0$DYAD_B_A <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEA
sub0$DYAD_B_B <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEB
sub0$DYAD_B_C <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEC
sub0$DYAD_B_D <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDED
sub0$DYAD_C_A <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEA
sub0$DYAD_C_B <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEB
sub0$DYAD_C_C <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEC
sub0$DYAD_C_D <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDED
sub0$DYAD_D_A <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEA
sub0$DYAD_D_B <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEB
sub0$DYAD_D_C <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEC
sub0$DYAD_D_D <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDED
# Undirected
if(!init.type%in%c("_init")){
sub0$DYAD_B_A <- sub0$DYAD_A_B
sub0$DYAD_C_A <- sub0$DYAD_A_C
sub0$DYAD_C_B <- sub0$DYAD_B_C
sub0$DYAD_D_A <- sub0$DYAD_A_D
sub0$DYAD_D_B <- sub0$DYAD_B_D
sub0$DYAD_D_C <- sub0$DYAD_C_D
}
# Actions (indiscriminate = violence vs. civilians)
sub0$ACTION_ANY <- events0$ACTION_ANY
sub0$ACTION_IND <- events0$ACTION_IND
sub0$ACTION_DIR <- events0$ACTION_DIR
sub0$ACTION_PRT <- events0$ACTION_PRT
# Actor-action
sub0$SIDEA_ANY <- sub0$INITIATOR_SIDEA*sub0$ACTION_ANY
sub0$SIDEA_IND <- sub0$INITIATOR_SIDEA*sub0$ACTION_IND
sub0$SIDEA_DIR <- sub0$INITIATOR_SIDEA*sub0$ACTION_DIR
sub0$SIDEA_PRT <- sub0$INITIATOR_SIDEA*sub0$ACTION_PRT
sub0$SIDEB_ANY <- sub0$INITIATOR_SIDEB*sub0$ACTION_ANY
sub0$SIDEB_IND <- sub0$INITIATOR_SIDEB*sub0$ACTION_IND
sub0$SIDEB_DIR <- sub0$INITIATOR_SIDEB*sub0$ACTION_DIR
sub0$SIDEB_PRT <- sub0$INITIATOR_SIDEB*sub0$ACTION_PRT
sub0$SIDEC_ANY <- sub0$INITIATOR_SIDEC*sub0$ACTION_ANY
sub0$SIDEC_IND <- sub0$INITIATOR_SIDEC*sub0$ACTION_IND
sub0$SIDEC_DIR <- sub0$INITIATOR_SIDEC*sub0$ACTION_DIR
sub0$SIDEC_PRT <- sub0$INITIATOR_SIDEC*sub0$ACTION_PRT
sub0$SIDED_ANY <- sub0$INITIATOR_SIDED*sub0$ACTION_ANY
sub0$SIDED_IND <- sub0$INITIATOR_SIDED*sub0$ACTION_IND
sub0$SIDED_DIR <- sub0$INITIATOR_SIDED*sub0$ACTION_DIR
sub0$SIDED_PRT <- sub0$INITIATOR_SIDED*sub0$ACTION_PRT
events <- sub0
# Multi-day events
sub0$ID_TEMP <- events0$ID_TEMP
head(subdata)
end.dates <- gsub("-","",subdata$DATE_END)
t <- 3
max(end.dates)
summary(as.numeric(end.dates)-as.numeric(sub.datez))
md.list <- lapply(1:nrow(sub0),function(t){#print(t)
sub.t <- sub0[t,]
if(as.character(sub0[t,"DATE"])<end.dates[t]){
start.t <- paste(substr(sub0[t,"DATE"],1,4),substr(sub0[t,"DATE"],5,6),substr(sub0[t,"DATE"],7,8),sep="-")
end.t <- paste(substr(end.dates[t],1,4),substr(end.dates[t],5,6),substr(end.dates[t],7,8),sep="-")
spells <- seq(as.Date(start.t), as.Date(end.t), by="1 day")
spells <- gsub("-","",spells)
sub.t <- sub.t[rep(1,length(spells)),]
sub.t$DATE <- spells
row.names(sub.t) <- 1:nrow(sub.t)}
sub.t
})
events <- do.call(rbind,md.list)
summary(events)
# Conform events0
events0.temp <- data.frame(ID_TEMP=events$ID_TEMP)
events0 <- merge(events0.temp,events0,by="ID_TEMP",all.x=T,all.y=T)
mean(events$ID_TEMP==events0$ID_TEMP)
events$ID_TEMP <- NULL
# Save
save(events,file=paste0("Output/Output_GED0/Events/GED0_Events_",countrycode(disag[j],origin = "iso3c",destination = "iso3c"),".RData"))
events0 <- events0[,names(events0)[!names(events0)%in%names(events)]]
save(events0,file=paste0("Output/Output_GED0/Events/EventType/GED0_EventType_",countrycode(disag[j],origin = "iso3c",destination = "iso3c"),".RData"))
events
# # Close loop, Single-core
# })
# Close loop, Multi-core
},mc.preschedule = FALSE, mc.set.seed = TRUE,mc.silent = FALSE, mc.cores = detectCores())
| /Code/step2_eventcode/step2_eventcode_GED0.R | no_license | zhukovyuri/xSub_ReplicationCode | R | false | false | 11,352 | r | rm(list=ls())
## Set directory
setwd("~/")
if("XSub"%in%dir()){setwd("~/XSub/Data/")}
if("Dropbox2"%in%dir()){setwd("~/Dropbox2/Dropbox (Zhukov research team)/XSub/Data/")}
# setwd("F:/Dropbox (Zhukov research team)/XSub/Data/")
## Install & load packages (all at once)
list.of.packages <- c("gdata","countrycode","maptools","foreign","plotrix","sp","raster","rgeos","gdata","parallel")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]; if(length(new.packages)){install.packages(new.packages,dependencies=TRUE)}
lapply(list.of.packages, require, character.only = TRUE)
## Load custom functions
source("Code/functions.R")
#############################
## Create event-level data
#############################
## Load event type dictionary
load("Dictionaries/EventTypes/Combined_EventDictionary.RData")
source("Code/step2_eventcode/step2x_event_types_list.R")
source("Code/step2_eventcode/step2x_eventType_function.R")
# Load events
# # Clean up
# ged.raw <- read.csv("Input/Events/UCDP_GED/ged171.csv")
# countrylist <- sort(unique(as.character(ged.raw$country)))
# countrylist <- data.frame(country=countrylist,iso3=countrycode(countrylist,origin="country.name",destination="iso3c"))
# for(j in 1:2){countrylist[,j]<-as.character(countrylist[,j])}
# countrylist[countrylist$country=="Yemen (North Yemen)","iso3"] <- countrycode("Yemen","country.name","iso3c")
# countrylist[countrylist$country=="Serbia (Yugoslavia)","iso3"] <- countrycode("Serbia","country.name","iso3c")
# ged.raw <- merge(ged.raw,countrylist,by="country",all.x=T,all.y=T)
# save(ged.raw,file="Input/Events/UCDP_GED/ged171.RData")
# Load Syria data (for some reason missing from v 17.1)
load("Input/Events/UCDP_GED/ged30.RData")
data <- ged.raw[ged.raw$iso3%in%"SYR",]
classez<-c();for(j in 1:ncol(data)){classez[j]<-class(data[,j]);if(classez[j]=="factor"){data[,j]<-as.character(data[,j])}}
names(data) <- toupper(names(data))
syr.raw <- data
head(syr.raw); rm(data)
## Load raw data
load("Input/Events/UCDP_GED/ged171.RData")
data <- ged.raw
classez<-c();for(j in 1:ncol(data)){classez[j]<-class(data[,j]);if(classez[j]=="factor"){data[,j]<-as.character(data[,j])}}
names(data) <- toupper(names(data))
head(data)
# Merge
commonvars <- intersect(names(data),names(syr.raw))
data <- rbind(data[,commonvars],syr.raw[,commonvars])
head(data); rm(syr.raw,commonvars)
# Precision codes
names(data)
sort(unique(data$WHERE_PREC))
data$GEOPRECISION0 <- "settlement"
data$GEOPRECISION0[which(data$WHERE_PREC%in%c(2,3))] <- "adm2"
data$GEOPRECISION0[which(data$WHERE_PREC%in%c(4,5))] <- "adm1"
data$GEOPRECISION0[which(data$WHERE_PREC%in%c(6,7))] <- "adm0"
data$TIMEPRECISION0 <- "day"
data$TIMEPRECISION0[which(data$DATE_PREC%in%c(2,3))] <- "week"
data$TIMEPRECISION0[which(data$DATE_PREC%in%c(4))] <- "month"
data$TIMEPRECISION0[which(data$DATE_PREC%in%c(5))] <- "year"
tail(data)
# Initiator classification
init.type <- "_init"
## By country
disag <- sort(unique(data$ISO3))
## Check missing dictionaries
disag[!disag%in%sapply(strsplit(dir("Dictionaries/GED/Combined/"),"_"),"[",2)]
j <- 1; disag[j]
# # Open loop, Single-Core
# ged.list <- lapply(1:length(disag),function(j){print(j)
# Open loop, Multi-Core
ged.list <- mclapply(1:length(disag),function(j){print(j)
# Subset
subdata <- data[data$ISO3==disag[j],]
head(subdata)
# Dates & locations
sub.datez <- gsub("-","",subdata$DATE_START)
sub.lat <- subdata$LATITUDE
sub.long <- subdata$LONGITUDE
sub.precis <- subdata$GEOPRECISION0
sub.tprecis <- subdata$TIMEPRECISION0
sub0 <- data.frame(SOURCE=paste0("GED_v171"),CONFLICT=countrycode(disag[j],"iso3c","country.name"),COWN=countrycode(disag[j],origin = "iso3c",destination = "cown"),COWC=countrycode(disag[j],origin = "iso3c",destination = "cowc"),ISO3=countrycode(disag[j],origin = "iso3c",destination = "iso3c"),DATE=as.character(sub.datez),LAT=sub.lat,LONG=sub.long,GEOPRECISION=sub.precis,TIMEPRECISION=sub.tprecis)
# Event Types (use dictionary)
head(subdata)
subdata$ID_TEMP <- 1:nrow(subdata)
subdata$TEXT <- subdata$SOURCE_ARTICLE
subdata$TEXT <- iconv(subdata$TEXT,"WINDOWS-1252","UTF-8")
subdata$TEXT <- tolower(subdata$TEXT)
textvar <- "TEXT"
idvar <- "ID_TEMP"
length(unique(subdata[,idvar]))==nrow(subdata)
events0 <- eventType(subdata=subdata,idvar=idvar,textvar=textvar,term.type=term.type,types.specific=types.specific,types.general=types.general)
summary(events0)
# Tactic-based actor coding
if(init.type%in%c("_init")){
load("DataCensus/TacticsActors_v1.RData")
# tactic.ix <- data.frame(ACTION=types.specific,INITIATOR="",stringsAsFactors = FALSE)
# tactic.ix[tactic.ix$ACTION%in%c("AIRSTRIKE","ARMOR","ARREST","RIOTCONTROL"),"INITIATOR"] <- "SIDEA"
# tactic.ix[tactic.ix$ACTION%in%c("PROTEST","PROTEST_V","RIOT","SUICIDE","TERROR"),"INITIATOR"] <- "SIDEB"
}
# Actors (use pre-existing dictionaries)
dir("Dictionaries/GED")
if(paste0("GED_",toupper(disag[j]),"_Actors.RData")%in%dir("Dictionaries/GED/Combined/")){load(paste0("Dictionaries/GED/Combined/GED_",toupper(disag[j]),"_Actors.RData"))}
if(length(actorlist$actors_GOV)>0){actorlist$actors_GOV <- trim(sapply(strsplit(actorlist$actors_GOV,split="\\(\\d{4}|\\(Inf"), '[', 1))}
if(length(actorlist$actors_REB)>0){actorlist$actors_REB <- trim(sapply(strsplit(actorlist$actors_REB,split="\\(\\d{4}|\\(Inf"), '[', 1))}
if(length(actorlist$actors_CIV)>0){actorlist$actors_CIV <- trim(sapply(strsplit(actorlist$actors_CIV,split="\\(\\d{4}|\\(Inf"), '[', 1))}
if(length(actorlist$actors_OTH)>0){actorlist$actors_OTH <- trim(sapply(strsplit(actorlist$actors_OTH,split="\\(\\d{4}|\\(Inf"), '[', 1))}
sub0$INITIATOR_SIDEA <- 1*(subdata$SIDE_A%in%actorlist$actors_GOV)
sub0$INITIATOR_SIDEB <- 1*(subdata$SIDE_A%in%actorlist$actors_REB)
sub0$INITIATOR_SIDEC <- 1*(subdata$SIDE_A%in%actorlist$actors_CIV)
sub0$INITIATOR_SIDED <- 1*((subdata$SIDE_A%in%actorlist$actors_OTH)|(subdata$SIDE_A%in%actorlist$actors&(!subdata$SIDE_A%in%c(actorlist$actors_GOV,actorlist$actors_REB,actorlist$actors_CIV))))
sub0$TARGET_SIDEA <- 1*(subdata$SIDE_B%in%actorlist$actors_GOV)
sub0$TARGET_SIDEB <- 1*(subdata$SIDE_B%in%actorlist$actors_REB)
sub0$TARGET_SIDEC <- 1*(subdata$SIDE_B%in%actorlist$actors_CIV)
sub0$TARGET_SIDED <- 1*((subdata$SIDE_B%in%actorlist$actors_OTH)|(subdata$SIDE_B%in%actorlist$actors&(!subdata$SIDE_B%in%c(actorlist$actors_GOV,actorlist$actors_REB,actorlist$actors_CIV))))
# Tactic-based event intiator fix
if(init.type%in%c("_init")){
sub2 <- sub0
flipz.a <- apply(events0[,paste0("ACTION_",tactic.ix$ACTION[tactic.ix$INITIATOR%in%"SIDEA"])],1,function(x){sum(x,na.rm=T)})>0
flipz.ab <- flipz.a&sub0$INITIATOR_SIDEB==1
flipz.aba <- flipz.ab&flipz.a&sub0$TARGET_SIDEA==1
flipz.b <- apply(events0[,paste0("ACTION_",tactic.ix$ACTION[tactic.ix$INITIATOR%in%"SIDEB"])],1,function(x){sum(x,na.rm=T)})>0
flipz.ba <- flipz.b&sub0$INITIATOR_SIDEA==1
flipz.bab <- flipz.ba&flipz.b&sub0$TARGET_SIDEB==1
sub2$INITIATOR_SIDEA[flipz.ab] <- 1
sub2$INITIATOR_SIDEB[flipz.ab] <- 0
sub2$TARGET_SIDEA[flipz.aba] <- 0
sub2$TARGET_SIDEB[flipz.aba] <- 1
sub2$INITIATOR_SIDEA[flipz.ba] <- 0
sub2$INITIATOR_SIDEB[flipz.ba] <- 1
sub2$TARGET_SIDEA[flipz.bab] <- 1
sub2$TARGET_SIDEB[flipz.bab] <- 0
sub0 <- sub2
}
# Dyads
sub0$DYAD_A_A <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEA
sub0$DYAD_A_B <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEB
sub0$DYAD_A_C <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEC
sub0$DYAD_A_D <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDED
sub0$DYAD_B_A <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEA
sub0$DYAD_B_B <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEB
sub0$DYAD_B_C <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEC
sub0$DYAD_B_D <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDED
sub0$DYAD_C_A <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEA
sub0$DYAD_C_B <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEB
sub0$DYAD_C_C <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEC
sub0$DYAD_C_D <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDED
sub0$DYAD_D_A <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEA
sub0$DYAD_D_B <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEB
sub0$DYAD_D_C <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEC
sub0$DYAD_D_D <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDED
# Undirected
if(!init.type%in%c("_init")){
sub0$DYAD_B_A <- sub0$DYAD_A_B
sub0$DYAD_C_A <- sub0$DYAD_A_C
sub0$DYAD_C_B <- sub0$DYAD_B_C
sub0$DYAD_D_A <- sub0$DYAD_A_D
sub0$DYAD_D_B <- sub0$DYAD_B_D
sub0$DYAD_D_C <- sub0$DYAD_C_D
}
# Actions (indiscriminate = violence vs. civilians)
sub0$ACTION_ANY <- events0$ACTION_ANY
sub0$ACTION_IND <- events0$ACTION_IND
sub0$ACTION_DIR <- events0$ACTION_DIR
sub0$ACTION_PRT <- events0$ACTION_PRT
# Actor-action
sub0$SIDEA_ANY <- sub0$INITIATOR_SIDEA*sub0$ACTION_ANY
sub0$SIDEA_IND <- sub0$INITIATOR_SIDEA*sub0$ACTION_IND
sub0$SIDEA_DIR <- sub0$INITIATOR_SIDEA*sub0$ACTION_DIR
sub0$SIDEA_PRT <- sub0$INITIATOR_SIDEA*sub0$ACTION_PRT
sub0$SIDEB_ANY <- sub0$INITIATOR_SIDEB*sub0$ACTION_ANY
sub0$SIDEB_IND <- sub0$INITIATOR_SIDEB*sub0$ACTION_IND
sub0$SIDEB_DIR <- sub0$INITIATOR_SIDEB*sub0$ACTION_DIR
sub0$SIDEB_PRT <- sub0$INITIATOR_SIDEB*sub0$ACTION_PRT
sub0$SIDEC_ANY <- sub0$INITIATOR_SIDEC*sub0$ACTION_ANY
sub0$SIDEC_IND <- sub0$INITIATOR_SIDEC*sub0$ACTION_IND
sub0$SIDEC_DIR <- sub0$INITIATOR_SIDEC*sub0$ACTION_DIR
sub0$SIDEC_PRT <- sub0$INITIATOR_SIDEC*sub0$ACTION_PRT
sub0$SIDED_ANY <- sub0$INITIATOR_SIDED*sub0$ACTION_ANY
sub0$SIDED_IND <- sub0$INITIATOR_SIDED*sub0$ACTION_IND
sub0$SIDED_DIR <- sub0$INITIATOR_SIDED*sub0$ACTION_DIR
sub0$SIDED_PRT <- sub0$INITIATOR_SIDED*sub0$ACTION_PRT
events <- sub0
# Multi-day events
sub0$ID_TEMP <- events0$ID_TEMP
head(subdata)
end.dates <- gsub("-","",subdata$DATE_END)
t <- 3
max(end.dates)
summary(as.numeric(end.dates)-as.numeric(sub.datez))
md.list <- lapply(1:nrow(sub0),function(t){#print(t)
sub.t <- sub0[t,]
if(as.character(sub0[t,"DATE"])<end.dates[t]){
start.t <- paste(substr(sub0[t,"DATE"],1,4),substr(sub0[t,"DATE"],5,6),substr(sub0[t,"DATE"],7,8),sep="-")
end.t <- paste(substr(end.dates[t],1,4),substr(end.dates[t],5,6),substr(end.dates[t],7,8),sep="-")
spells <- seq(as.Date(start.t), as.Date(end.t), by="1 day")
spells <- gsub("-","",spells)
sub.t <- sub.t[rep(1,length(spells)),]
sub.t$DATE <- spells
row.names(sub.t) <- 1:nrow(sub.t)}
sub.t
})
events <- do.call(rbind,md.list)
summary(events)
# Conform events0
events0.temp <- data.frame(ID_TEMP=events$ID_TEMP)
events0 <- merge(events0.temp,events0,by="ID_TEMP",all.x=T,all.y=T)
mean(events$ID_TEMP==events0$ID_TEMP)
events$ID_TEMP <- NULL
# Save
save(events,file=paste0("Output/Output_GED0/Events/GED0_Events_",countrycode(disag[j],origin = "iso3c",destination = "iso3c"),".RData"))
events0 <- events0[,names(events0)[!names(events0)%in%names(events)]]
save(events0,file=paste0("Output/Output_GED0/Events/EventType/GED0_EventType_",countrycode(disag[j],origin = "iso3c",destination = "iso3c"),".RData"))
events
# # Close loop, Single-core
# })
# Close loop, Multi-core
},mc.preschedule = FALSE, mc.set.seed = TRUE,mc.silent = FALSE, mc.cores = detectCores())
|
setwd("C:\\Users\\Ivan.Liuyanfeng\\Desktop\\Data_Mining_Work_Space\\Titanic-Machine-Learning-from-Disaster\\R")
trainData <- read.csv('train.csv',header=T,stringsAsFactor=F)
testData <- read.csv('test.csv',header=T,stringsAsFactor=F)
head(trainData)
names(trainData)
plot(density(trainData$Age,na.rm=T))
plot(density(trainData$Fare,na.rm=T))
plot(density(trainData$Pclass,na.rm=T))
counts <- table(trainData$Survived,trainData$Sex)
barplot(counts,xlab='Gender',ylab='Number of People',main='survived and deceased between male and female')
counts[2] / (counts[1] + counts[2])
counts[4] / (counts[3] + counts[4])
Pclass_survival <- table(trainData$Survived, trainData$Pclass)
barplot(Pclass_survival, xlab = "Cabin Class", ylab = "Number of People",
main = "survived and deceased between male and female")
Pclass_survival[2] / (Pclass_survival[1] + Pclass_survival[2])
Pclass_survival[4] / (Pclass_survival[3] + Pclass_survival[4])
Pclass_survival[6] / (Pclass_survival[5] + Pclass_survival[6])
trainData = trainData[-c(1,9:12)]
trainData$Sex = gsub("female", 1, trainData$Sex)
trainData$Sex = gsub("^male", 0, trainData$Sex)
master_vector = grep("Master.",trainData$Name, fixed=TRUE)
miss_vector = grep("Miss.", trainData$Name, fixed=TRUE)
mrs_vector = grep("Mrs.", trainData$Name, fixed=TRUE)
mr_vector = grep("Mr.", trainData$Name, fixed=TRUE)
dr_vector = grep("Dr.", trainData$Name, fixed=TRUE)
for(i in master_vector) {
trainData$Name[i] = "Master"
}
for(i in miss_vector) {
trainData$Name[i] = "Miss"
}
for(i in mrs_vector) {
trainData$Name[i] = "Mrs"
}
for(i in mr_vector) {
trainData$Name[i] = "Mr"
}
for(i in dr_vector) {
trainData$Name[i] = "Dr"
}
master_age = round(mean(trainData$Age[trainData$Name == "Master"], na.rm = TRUE), digits = 2)
miss_age = round(mean(trainData$Age[trainData$Name == "Miss"], na.rm = TRUE), digits =2)
mrs_age = round(mean(trainData$Age[trainData$Name == "Mrs"], na.rm = TRUE), digits = 2)
mr_age = round(mean(trainData$Age[trainData$Name == "Mr"], na.rm = TRUE), digits = 2)
dr_age = round(mean(trainData$Age[trainData$Name == "Dr"], na.rm = TRUE), digits = 2)
for (i in 1:nrow(trainData)) {
if (is.na(trainData[i,5])) {
if (trainData$Name[i] == "Master") {
trainData$Age[i] = master_age
} else if (trainData$Name[i] == "Miss") {
trainData$Age[i] = miss_age
} else if (trainData$Name[i] == "Mrs") {
trainData$Age[i] = mrs_age
} else if (trainData$Name[i] == "Mr") {
trainData$Age[i] = mr_age
} else if (trainData$Name[i] == "Dr") {
trainData$Age[i] = dr_age
} else {
print("Uncaught Title")
}
}
}
trainData["Child"]
for (i in 1:nrow(trainData)) {
if (trainData$Age[i] <= 12) {
trainData$Child[i] = 1
} else {
trainData$Child[i] = 2
}
}
trainData["Family"] = NA
for(i in 1:nrow(trainData)) {
x = trainData$SibSp[i]
y = trainData$Parch[i]
trainData$Family[i] = x + y + 1
}
trainData["Mother"] = NA
for(i in 1:nrow(trainData)) {
if(trainData$Name[i] == "Mrs" & trainData$Parch[i] > 0) {
trainData$Mother[i] = 1
} else {
trainData$Mother[i] = 2
}
}
# test data
PassengerId = testData[1]
testData = testData[-c(1, 8:11)]
testData$Sex = gsub("female", 1, testData$Sex)
testData$Sex = gsub("^male", 0, testData$Sex)
test_master_vector = grep("Master.",testData$Name)
test_miss_vector = grep("Miss.", testData$Name)
test_mrs_vector = grep("Mrs.", testData$Name)
test_mr_vector = grep("Mr.", testData$Name)
test_dr_vector = grep("Dr.", testData$Name)
for(i in test_master_vector) {
testData[i, 2] = "Master"
}
for(i in test_miss_vector) {
testData[i, 2] = "Miss"
}
for(i in test_mrs_vector) {
testData[i, 2] = "Mrs"
}
for(i in test_mr_vector) {
testData[i, 2] = "Mr"
}
for(i in test_dr_vector) {
testData[i, 2] = "Dr"
}
test_master_age = round(mean(testData$Age[testData$Name == "Master"], na.rm = TRUE), digits = 2)
test_miss_age = round(mean(testData$Age[testData$Name == "Miss"], na.rm = TRUE), digits =2)
test_mrs_age = round(mean(testData$Age[testData$Name == "Mrs"], na.rm = TRUE), digits = 2)
test_mr_age = round(mean(testData$Age[testData$Name == "Mr"], na.rm = TRUE), digits = 2)
test_dr_age = round(mean(testData$Age[testData$Name == "Dr"], na.rm = TRUE), digits = 2)
for (i in 1:nrow(testData)) {
if (is.na(testData[i,4])) {
if (testData[i, 2] == "Master") {
testData[i, 4] = test_master_age
} else if (testData[i, 2] == "Miss") {
testData[i, 4] = test_miss_age
} else if (testData[i, 2] == "Mrs") {
testData[i, 4] = test_mrs_age
} else if (testData[i, 2] == "Mr") {
testData[i, 4] = test_mr_age
} else if (testData[i, 2] == "Dr") {
testData[i, 4] = test_dr_age
} else {
print(paste("Uncaught title at: ", i, sep=""))
print(paste("The title unrecognized was: ", testData[i,2], sep=""))
}
}
}
#We do a manual replacement here, because we weren't able to programmatically figure out the title.
#We figured out it was 89 because the above print statement should have warned us.
testData[89, 4] = test_miss_age
testData["Child"] = NA
for (i in 1:nrow(testData)) {
if (testData[i, 4] <= 12) {
testData[i, 7] = 1
} else {
testData[i, 7] = 1
}
}
testData["Family"] = NA
for(i in 1:nrow(testData)) {
testData[i, 8] = testData[i, 5] + testData[i, 6] + 1
}
testData["Mother"] = NA
for(i in 1:nrow(testData)) {
if(testData[i, 2] == "Mrs" & testData[i, 6] > 0) {
testData[i, 9] = 1
} else {
testData[i, 9] = 2
}
}
# logitic regression model
train.glm <- glm(Survived ~ Pclass + Sex + Age + Child + Sex*Pclass + Family + Mother, family = binomial, data = trainData)
summary(train.glm)
p.hats <- predict.glm(train.glm, newdata = testData, type = "response")
survival <- vector()
for(i in 1:length(p.hats)) {
if(p.hats[i] > .5) {
survival[i] <- 1
} else {
survival[i] <- 0
}
}
kaggle.sub <- cbind(PassengerId,survival)
colnames(kaggle.sub) <- c("PassengerId", "Survived")
write.csv(kaggle.sub, file = "kaggle.csv", row.names = FALSE)
| /R/model1.R | no_license | lucentcosmos/Titanic-Machine-Learning-from-Disaster | R | false | false | 6,263 | r | setwd("C:\\Users\\Ivan.Liuyanfeng\\Desktop\\Data_Mining_Work_Space\\Titanic-Machine-Learning-from-Disaster\\R")
trainData <- read.csv('train.csv',header=T,stringsAsFactor=F)
testData <- read.csv('test.csv',header=T,stringsAsFactor=F)
head(trainData)
names(trainData)
plot(density(trainData$Age,na.rm=T))
plot(density(trainData$Fare,na.rm=T))
plot(density(trainData$Pclass,na.rm=T))
counts <- table(trainData$Survived,trainData$Sex)
barplot(counts,xlab='Gender',ylab='Number of People',main='survived and deceased between male and female')
counts[2] / (counts[1] + counts[2])
counts[4] / (counts[3] + counts[4])
Pclass_survival <- table(trainData$Survived, trainData$Pclass)
barplot(Pclass_survival, xlab = "Cabin Class", ylab = "Number of People",
main = "survived and deceased between male and female")
Pclass_survival[2] / (Pclass_survival[1] + Pclass_survival[2])
Pclass_survival[4] / (Pclass_survival[3] + Pclass_survival[4])
Pclass_survival[6] / (Pclass_survival[5] + Pclass_survival[6])
trainData = trainData[-c(1,9:12)]
trainData$Sex = gsub("female", 1, trainData$Sex)
trainData$Sex = gsub("^male", 0, trainData$Sex)
master_vector = grep("Master.",trainData$Name, fixed=TRUE)
miss_vector = grep("Miss.", trainData$Name, fixed=TRUE)
mrs_vector = grep("Mrs.", trainData$Name, fixed=TRUE)
mr_vector = grep("Mr.", trainData$Name, fixed=TRUE)
dr_vector = grep("Dr.", trainData$Name, fixed=TRUE)
for(i in master_vector) {
trainData$Name[i] = "Master"
}
for(i in miss_vector) {
trainData$Name[i] = "Miss"
}
for(i in mrs_vector) {
trainData$Name[i] = "Mrs"
}
for(i in mr_vector) {
trainData$Name[i] = "Mr"
}
for(i in dr_vector) {
trainData$Name[i] = "Dr"
}
master_age = round(mean(trainData$Age[trainData$Name == "Master"], na.rm = TRUE), digits = 2)
miss_age = round(mean(trainData$Age[trainData$Name == "Miss"], na.rm = TRUE), digits =2)
mrs_age = round(mean(trainData$Age[trainData$Name == "Mrs"], na.rm = TRUE), digits = 2)
mr_age = round(mean(trainData$Age[trainData$Name == "Mr"], na.rm = TRUE), digits = 2)
dr_age = round(mean(trainData$Age[trainData$Name == "Dr"], na.rm = TRUE), digits = 2)
for (i in 1:nrow(trainData)) {
if (is.na(trainData[i,5])) {
if (trainData$Name[i] == "Master") {
trainData$Age[i] = master_age
} else if (trainData$Name[i] == "Miss") {
trainData$Age[i] = miss_age
} else if (trainData$Name[i] == "Mrs") {
trainData$Age[i] = mrs_age
} else if (trainData$Name[i] == "Mr") {
trainData$Age[i] = mr_age
} else if (trainData$Name[i] == "Dr") {
trainData$Age[i] = dr_age
} else {
print("Uncaught Title")
}
}
}
trainData["Child"]
for (i in 1:nrow(trainData)) {
if (trainData$Age[i] <= 12) {
trainData$Child[i] = 1
} else {
trainData$Child[i] = 2
}
}
trainData["Family"] = NA
for(i in 1:nrow(trainData)) {
x = trainData$SibSp[i]
y = trainData$Parch[i]
trainData$Family[i] = x + y + 1
}
trainData["Mother"] = NA
for(i in 1:nrow(trainData)) {
if(trainData$Name[i] == "Mrs" & trainData$Parch[i] > 0) {
trainData$Mother[i] = 1
} else {
trainData$Mother[i] = 2
}
}
# test data
PassengerId = testData[1]
testData = testData[-c(1, 8:11)]
testData$Sex = gsub("female", 1, testData$Sex)
testData$Sex = gsub("^male", 0, testData$Sex)
test_master_vector = grep("Master.",testData$Name)
test_miss_vector = grep("Miss.", testData$Name)
test_mrs_vector = grep("Mrs.", testData$Name)
test_mr_vector = grep("Mr.", testData$Name)
test_dr_vector = grep("Dr.", testData$Name)
for(i in test_master_vector) {
testData[i, 2] = "Master"
}
for(i in test_miss_vector) {
testData[i, 2] = "Miss"
}
for(i in test_mrs_vector) {
testData[i, 2] = "Mrs"
}
for(i in test_mr_vector) {
testData[i, 2] = "Mr"
}
for(i in test_dr_vector) {
testData[i, 2] = "Dr"
}
test_master_age = round(mean(testData$Age[testData$Name == "Master"], na.rm = TRUE), digits = 2)
test_miss_age = round(mean(testData$Age[testData$Name == "Miss"], na.rm = TRUE), digits =2)
test_mrs_age = round(mean(testData$Age[testData$Name == "Mrs"], na.rm = TRUE), digits = 2)
test_mr_age = round(mean(testData$Age[testData$Name == "Mr"], na.rm = TRUE), digits = 2)
test_dr_age = round(mean(testData$Age[testData$Name == "Dr"], na.rm = TRUE), digits = 2)
for (i in 1:nrow(testData)) {
if (is.na(testData[i,4])) {
if (testData[i, 2] == "Master") {
testData[i, 4] = test_master_age
} else if (testData[i, 2] == "Miss") {
testData[i, 4] = test_miss_age
} else if (testData[i, 2] == "Mrs") {
testData[i, 4] = test_mrs_age
} else if (testData[i, 2] == "Mr") {
testData[i, 4] = test_mr_age
} else if (testData[i, 2] == "Dr") {
testData[i, 4] = test_dr_age
} else {
print(paste("Uncaught title at: ", i, sep=""))
print(paste("The title unrecognized was: ", testData[i,2], sep=""))
}
}
}
#We do a manual replacement here, because we weren't able to programmatically figure out the title.
#We figured out it was 89 because the above print statement should have warned us.
testData[89, 4] = test_miss_age
testData["Child"] = NA
for (i in 1:nrow(testData)) {
if (testData[i, 4] <= 12) {
testData[i, 7] = 1
} else {
testData[i, 7] = 1
}
}
testData["Family"] = NA
for(i in 1:nrow(testData)) {
testData[i, 8] = testData[i, 5] + testData[i, 6] + 1
}
testData["Mother"] = NA
for(i in 1:nrow(testData)) {
if(testData[i, 2] == "Mrs" & testData[i, 6] > 0) {
testData[i, 9] = 1
} else {
testData[i, 9] = 2
}
}
# logitic regression model
train.glm <- glm(Survived ~ Pclass + Sex + Age + Child + Sex*Pclass + Family + Mother, family = binomial, data = trainData)
summary(train.glm)
p.hats <- predict.glm(train.glm, newdata = testData, type = "response")
survival <- vector()
for(i in 1:length(p.hats)) {
if(p.hats[i] > .5) {
survival[i] <- 1
} else {
survival[i] <- 0
}
}
kaggle.sub <- cbind(PassengerId,survival)
colnames(kaggle.sub) <- c("PassengerId", "Survived")
write.csv(kaggle.sub, file = "kaggle.csv", row.names = FALSE)
|
# A manipulate plot to diagnose what's going on
library(paradox)
library(manipulate)
library(doParallel)
registerDoParallel(cores = 2)
manipulate({
burnin <- round(t_end / 2)
out <- paradox_sim_reps(m = m, q = q, cpar = cpar, n = n, p = p, alpha
= alpha, beta = beta, num_pop = seq(1, maxn),
sigma = seq(0, max_sigma, length.out = 6), reps = reps, t_end =
t_end, burnin = burnin, return_ts = FALSE, .parallel = TRUE)
paradox_diag_plot(out)
},
m = slider(0.01, 0.2, 0.1),
q = slider(0.001, 0.2, 0.01),
cpar = slider(0.5, 4, 2),
n = slider(0.4, 1.5, 1),
p = slider(0.5, 1.5, 1),
alpha = slider(0.1, 1.5, 0.5),
beta = slider(1/500, 1/10, 1/150),
maxn = slider(2,40, 20),
reps = slider(5, 100, 20),
max_sigma = slider(0.1, 1.5, 0.5),
t_end = slider(100, 2000, 700)
)
# A manipulate plot of the time series:
plot_subpops <- function(lower_sigma = 0.02, upper_sigma = 0.3, lower_num_pop = 2, upper_num_pop = 20, years_to_plot = 60, y_min = 0, y_max = 100, alpha = 0.5, ...){
sigmas <- seq(upper_sigma, lower_sigma, length.out = 4)
num_pops <- round(seq(lower_num_pop, upper_num_pop, length.out = 4))
par(mfrow = c(4,4), mar = c(0,0,0,0), oma = c(5,5, 1, 1))
ignore <- sapply(sigmas, function(s) {
sapply(num_pops, function(np) {
out <- paradox_sim(alpha = rep(alpha, np), sigma = s, num_pop = np, return_ts = TRUE, ...)$biomass
if(np > 1) {
matplot(t(out[,1:years_to_plot]), type = "l", lty = 1, col = RColorBrewer::brewer.pal(6, "Dark2"), axes = FALSE, ylim = c(y_min, y_max))
} else {
matplot(out[,1:years_to_plot], type = "l", lty = 1, col = RColorBrewer::brewer.pal(6, "Dark2"), axes = FALSE, ylim = c(y_min, y_max))
}
box(col = "lightgrey")
if(s == min(sigmas)) mtext(paste("N =", np), side = 1, line = 1)
if(np == min(num_pops)) mtext(paste("sigma =", s), side = 2, line = 1)
})})
mtext("Numer of populations", side = 1, outer = TRUE , line = 3)
mtext("Process noise", side = 2, outer = TRUE , line = 3)
}
manipulate({
plot_subpops(alpha = alpha, m = m, q = q, cpar = cpar, n = n,p = p, beta = beta, upper_sigma = upper_sigma, y_max = y_max)},
m = slider(0.01, 0.2, 0.1),
q = slider(0.001, 0.2, 0.01),
cpar = slider(0.5, 4, 2),
n = slider(0.4, 1.5, 1),
p = slider(0.5, 1.5, 1),
alpha = slider(0.1, 1.5, 0.5),
beta = slider(1/500, 1/10, 1/150),
y_max = slider(10, 600, 100),
upper_sigma = slider(0.2, 2, 0.5)
) | /demo/diagnostic-manipulate.r | no_license | jdyeakel/paradox | R | false | false | 2,430 | r | # A manipulate plot to diagnose what's going on
library(paradox)
library(manipulate)
library(doParallel)
registerDoParallel(cores = 2)
manipulate({
burnin <- round(t_end / 2)
out <- paradox_sim_reps(m = m, q = q, cpar = cpar, n = n, p = p, alpha
= alpha, beta = beta, num_pop = seq(1, maxn),
sigma = seq(0, max_sigma, length.out = 6), reps = reps, t_end =
t_end, burnin = burnin, return_ts = FALSE, .parallel = TRUE)
paradox_diag_plot(out)
},
m = slider(0.01, 0.2, 0.1),
q = slider(0.001, 0.2, 0.01),
cpar = slider(0.5, 4, 2),
n = slider(0.4, 1.5, 1),
p = slider(0.5, 1.5, 1),
alpha = slider(0.1, 1.5, 0.5),
beta = slider(1/500, 1/10, 1/150),
maxn = slider(2,40, 20),
reps = slider(5, 100, 20),
max_sigma = slider(0.1, 1.5, 0.5),
t_end = slider(100, 2000, 700)
)
# A manipulate plot of the time series:
plot_subpops <- function(lower_sigma = 0.02, upper_sigma = 0.3, lower_num_pop = 2, upper_num_pop = 20, years_to_plot = 60, y_min = 0, y_max = 100, alpha = 0.5, ...){
sigmas <- seq(upper_sigma, lower_sigma, length.out = 4)
num_pops <- round(seq(lower_num_pop, upper_num_pop, length.out = 4))
par(mfrow = c(4,4), mar = c(0,0,0,0), oma = c(5,5, 1, 1))
ignore <- sapply(sigmas, function(s) {
sapply(num_pops, function(np) {
out <- paradox_sim(alpha = rep(alpha, np), sigma = s, num_pop = np, return_ts = TRUE, ...)$biomass
if(np > 1) {
matplot(t(out[,1:years_to_plot]), type = "l", lty = 1, col = RColorBrewer::brewer.pal(6, "Dark2"), axes = FALSE, ylim = c(y_min, y_max))
} else {
matplot(out[,1:years_to_plot], type = "l", lty = 1, col = RColorBrewer::brewer.pal(6, "Dark2"), axes = FALSE, ylim = c(y_min, y_max))
}
box(col = "lightgrey")
if(s == min(sigmas)) mtext(paste("N =", np), side = 1, line = 1)
if(np == min(num_pops)) mtext(paste("sigma =", s), side = 2, line = 1)
})})
mtext("Numer of populations", side = 1, outer = TRUE , line = 3)
mtext("Process noise", side = 2, outer = TRUE , line = 3)
}
manipulate({
plot_subpops(alpha = alpha, m = m, q = q, cpar = cpar, n = n,p = p, beta = beta, upper_sigma = upper_sigma, y_max = y_max)},
m = slider(0.01, 0.2, 0.1),
q = slider(0.001, 0.2, 0.01),
cpar = slider(0.5, 4, 2),
n = slider(0.4, 1.5, 1),
p = slider(0.5, 1.5, 1),
alpha = slider(0.1, 1.5, 0.5),
beta = slider(1/500, 1/10, 1/150),
y_max = slider(10, 600, 100),
upper_sigma = slider(0.2, 2, 0.5)
) |
library(iptmnetr)
### Name: get_ptm_enzymes_from_file
### Title: Get PTM Enzymes using a file
### Aliases: get_ptm_enzymes_from_file
### ** Examples
## Not run: enzymes = get_ptm_enzymes_from_file("kinases.txt")
| /data/genthat_extracted_code/iptmnetr/examples/get_ptm_enzymes_from_file.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 219 | r | library(iptmnetr)
### Name: get_ptm_enzymes_from_file
### Title: Get PTM Enzymes using a file
### Aliases: get_ptm_enzymes_from_file
### ** Examples
## Not run: enzymes = get_ptm_enzymes_from_file("kinases.txt")
|
Boutrp <-
function (x, cmat)
{
ngroup <- ncol(x$t)
f <- x$strata
ni <- unlist(lapply(split(f, f = f), length))
gnames <- names(x$t0)
names(ni) <- gnames
if (any(ni < 5)) {
warning("For sample sizes les than 5 this function hardly makes sense!")
}
chains <- x$t
out <- CCdrp(x = chains, cmat = cmat)
return(out)
}
| /R/Boutrp.R | no_license | shearer/simboot | R | false | false | 339 | r | Boutrp <-
function (x, cmat)
{
ngroup <- ncol(x$t)
f <- x$strata
ni <- unlist(lapply(split(f, f = f), length))
gnames <- names(x$t0)
names(ni) <- gnames
if (any(ni < 5)) {
warning("For sample sizes les than 5 this function hardly makes sense!")
}
chains <- x$t
out <- CCdrp(x = chains, cmat = cmat)
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dat_keech.R
\docType{data}
\name{dat_keech}
\alias{dat_keech}
\title{Studies on the impact of intranasal oxytocin on emotion recognition in neurodevelopmental disorders}
\format{
A data frame with 13 rows and 3 columns:
\describe{
\item{study}{The name of the first author of the study and the year it was published}
\item{yi}{Effect size measure (Hedges' g)}
\item{lower}{The lower bound of a 95\% confidence interval}
\item{upper}{The upper bound of a 95\% confidence interval}
...
}
}
\usage{
dat_keech
}
\description{
This is data from a meta-analysis on 12 studies investigating the impact of intranasal oxytocin
administration on emotion recognition in neurodevelopmental disorders. Postive effect size values
are indicative of a positive effect of intranasal oxytocin on emotion recognition. Effect size
measure and confidence intervals were extracted from Figure 2 of the article (see reference below).
}
\examples{
power_keech <-
mapower_ul(
dat = dat_keech,
observed_es = 0.08,
name = "Keech et al 2018"
)
}
\references{
{
Keech, B., Crowe, S., & Hocking, D. R. (2018). Intranasal oxytocin,
social cognition and neurodevelopmental disorders: a meta-analysis
\emph{Psychoneuroendocrinology}, \bold{87}, 9--19.
}
}
\keyword{datasets}
| /man/dat_keech.Rd | permissive | sayanmitra/metameta | R | false | true | 1,356 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dat_keech.R
\docType{data}
\name{dat_keech}
\alias{dat_keech}
\title{Studies on the impact of intranasal oxytocin on emotion recognition in neurodevelopmental disorders}
\format{
A data frame with 13 rows and 3 columns:
\describe{
\item{study}{The name of the first author of the study and the year it was published}
\item{yi}{Effect size measure (Hedges' g)}
\item{lower}{The lower bound of a 95\% confidence interval}
\item{upper}{The upper bound of a 95\% confidence interval}
...
}
}
\usage{
dat_keech
}
\description{
This is data from a meta-analysis on 12 studies investigating the impact of intranasal oxytocin
administration on emotion recognition in neurodevelopmental disorders. Postive effect size values
are indicative of a positive effect of intranasal oxytocin on emotion recognition. Effect size
measure and confidence intervals were extracted from Figure 2 of the article (see reference below).
}
\examples{
power_keech <-
mapower_ul(
dat = dat_keech,
observed_es = 0.08,
name = "Keech et al 2018"
)
}
\references{
{
Keech, B., Crowe, S., & Hocking, D. R. (2018). Intranasal oxytocin,
social cognition and neurodevelopmental disorders: a meta-analysis
\emph{Psychoneuroendocrinology}, \bold{87}, 9--19.
}
}
\keyword{datasets}
|
dat = read.table("mazhao_CACAO_Transposable_Element_Annotation_New.gff",sep="\t",header=T, fill=TRUE)
N = nrow(dat)
gff3 = data.frame(
seqid = dat$Scaffold,
source = rep("mazhao",N),
type = rep("transposable_element",N),
start = dat$Start_Position,
end = dat$End_Position,
score =rep(0.05,N),
strand =rep("?",N),
phase =rep(".",N),
attributes =character(N)
)
ids = with(dat,sprintf("TcTE_%d",Element_ID))
gnames= with(dat,sprintf("%s_%s_%s",Order,Super_Family,Family))
gff3$attributes <- sprintf("ID=%s;Name=%s",ids,gnames)
write.table(gff3,file="mazhao_CACAO_Transposable_Element_Annotation_JCS.gff3",sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
| /scripts/gff2gff3.R | no_license | ConradStack/BSgenome.Tcacao | R | false | false | 671 | r |
dat = read.table("mazhao_CACAO_Transposable_Element_Annotation_New.gff",sep="\t",header=T, fill=TRUE)
N = nrow(dat)
gff3 = data.frame(
seqid = dat$Scaffold,
source = rep("mazhao",N),
type = rep("transposable_element",N),
start = dat$Start_Position,
end = dat$End_Position,
score =rep(0.05,N),
strand =rep("?",N),
phase =rep(".",N),
attributes =character(N)
)
ids = with(dat,sprintf("TcTE_%d",Element_ID))
gnames= with(dat,sprintf("%s_%s_%s",Order,Super_Family,Family))
gff3$attributes <- sprintf("ID=%s;Name=%s",ids,gnames)
write.table(gff3,file="mazhao_CACAO_Transposable_Element_Annotation_JCS.gff3",sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
|
library("igraph")
library("ggplot2")
library("plyr")
links <- read.csv("./au_graph.txt", header=F, as.is=T, sep="\t")
# Simplify Graph
net <- graph_from_data_frame(d=links, directed=F)
# net <- simplify(net, remove.multiple = T, remove.loops = T)
net <- as.undirected(net, mode= "collapse")
largest_cliques(net)
# Count maximal cliques
sizes <- c(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
for (clique_size in sizes) {
cq <- max_cliques(net, min=clique_size, max=clique_size)
cat("[Maximal] clique size:", clique_size, "-", "Count:", length(cq), "\n")
}
### Output
# [Maximal] clique size: 3 - Count: 1022
# [Maximal] clique size: 4 - Count: 698
# [Maximal] clique size: 5 - Count: 455
# [Maximal] clique size: 6 - Count: 336
# [Maximal] clique size: 7 - Count: 318
# [Maximal] clique size: 8 - Count: 208
# [Maximal] clique size: 9 - Count: 132
# [Maximal] clique size: 10 - Count: 73
# [Maximal] clique size: 11 - Count: 36
# [Maximal] clique size: 12 - Count: 7
x <- c(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
y <- c(1022, 698, 455, 336, 318, 208, 132, 73, 36, 7)
df <- data.frame(x, y)
png(filename="maximal_clique_distribution.png", width=2048, height=2048)
ggplot(df, aes(x = x, y = y)) +
geom_point(size=20, colour="red", shape=20) +
scale_x_continuous("Clique Size", breaks=x) +
scale_y_continuous("Frequency", breaks=y) +
ggtitle("Maximal Cliques Distribution") +
theme(text = element_text(size=50))
dev.off()
| /codes/7_cliques.r | no_license | sa-akhavani/bitcoin-transaction-analysis | R | false | false | 1,432 | r | library("igraph")
library("ggplot2")
library("plyr")
links <- read.csv("./au_graph.txt", header=F, as.is=T, sep="\t")
# Simplify Graph
net <- graph_from_data_frame(d=links, directed=F)
# net <- simplify(net, remove.multiple = T, remove.loops = T)
net <- as.undirected(net, mode= "collapse")
largest_cliques(net)
# Count maximal cliques
sizes <- c(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
for (clique_size in sizes) {
cq <- max_cliques(net, min=clique_size, max=clique_size)
cat("[Maximal] clique size:", clique_size, "-", "Count:", length(cq), "\n")
}
### Output
# [Maximal] clique size: 3 - Count: 1022
# [Maximal] clique size: 4 - Count: 698
# [Maximal] clique size: 5 - Count: 455
# [Maximal] clique size: 6 - Count: 336
# [Maximal] clique size: 7 - Count: 318
# [Maximal] clique size: 8 - Count: 208
# [Maximal] clique size: 9 - Count: 132
# [Maximal] clique size: 10 - Count: 73
# [Maximal] clique size: 11 - Count: 36
# [Maximal] clique size: 12 - Count: 7
x <- c(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
y <- c(1022, 698, 455, 336, 318, 208, 132, 73, 36, 7)
df <- data.frame(x, y)
png(filename="maximal_clique_distribution.png", width=2048, height=2048)
ggplot(df, aes(x = x, y = y)) +
geom_point(size=20, colour="red", shape=20) +
scale_x_continuous("Clique Size", breaks=x) +
scale_y_continuous("Frequency", breaks=y) +
ggtitle("Maximal Cliques Distribution") +
theme(text = element_text(size=50))
dev.off()
|
# wrapper script to run model diagnostics on all HDDM analysis pipelines --------
########## still a working prototype!!
####################
## pipeline parameters to test
ics <- 0
# nsamples <- paste0("samp",c(1000, 2000, 5000, 10000, 20000, 40000, 80000))
nsamples <- "samp2000"
# tasks <- c("flanker", "recent_probes", "go_nogo")
tasks <- c("flanker", "recent_probes")
full_sample <- c("full_sample")#, "clean_sample")
data_type <- c("model_objects", "diagnostics")
create_plots <- TRUE
pacman::p_load(tidyverse)
# initialize large parameterization list ----------------------------------
##setup directory strings and source HDDM functions
basedir <- ifelse(ics == 0,"~/github_repos/PD_Inhibition_DDM", "/gpfs/group/mnh5174/default/Nate/PD_Inhibition_DDM"); setwd(basedir)
hddm_outputdir <- ifelse(ics == 0,"~/ics/Nate/HDDM_outputs_PD_Inhibition", "/gpfs/group/mnh5174/default/Nate/HDDM_outputs_PD_Inhibition")
R.utils::sourceDirectory(file.path(basedir, "Code/Functions/"))
# uber loop that performs posterior checks --------------------------------
suff_stats_all_pipelines <- list()
for(nsamp in nsamples){
for(task in tasks){
for(subs in full_sample){
diagnosdir <- file.path(hddm_outputdir,nsamp, task, subs, "diagnostics")
figuredir <- file.path(basedir,"Figures",nsamp, task, subs)
outdir <- file.path(basedir, "Outputs", "posterior_summaries", nsamp, task, subs)
DIC_path <- file.path(diagnosdir, "dics_all.csv")
for(mod in names(parameterizations[[task]])){
parameterizations[[task]][["traces"]] <- paste0(diagnosdir, "/",mod, "_traces.csv")
parameterizations[[task]][["gelman-rubin"]] <- paste0(diagnosdir, "/gr_",mod,".csv")
parameterizations[[task]][["outdir"]] <- outdir
parameterizations[[task]][["figuredir"]] <- figuredir
}
suff_stats_all_pipelines[[nsamp]][[task]][[subs]] <- hddm_posterior_diagnostics(parameterizations[[task]], DICs = DIC_path, v_contrasts = TRUE,m_digest = "win")
}
}
}
| /Code/DDM/hddm_diagnostics_wrapper.R | no_license | UNCDEPENdLab/PD_Inhibition_DDM | R | false | false | 2,040 | r |
# wrapper script to run model diagnostics on all HDDM analysis pipelines --------
########## still a working prototype!!
####################
## pipeline parameters to test
ics <- 0
# nsamples <- paste0("samp",c(1000, 2000, 5000, 10000, 20000, 40000, 80000))
nsamples <- "samp2000"
# tasks <- c("flanker", "recent_probes", "go_nogo")
tasks <- c("flanker", "recent_probes")
full_sample <- c("full_sample")#, "clean_sample")
data_type <- c("model_objects", "diagnostics")
create_plots <- TRUE
pacman::p_load(tidyverse)
# initialize large parameterization list ----------------------------------
##setup directory strings and source HDDM functions
basedir <- ifelse(ics == 0,"~/github_repos/PD_Inhibition_DDM", "/gpfs/group/mnh5174/default/Nate/PD_Inhibition_DDM"); setwd(basedir)
hddm_outputdir <- ifelse(ics == 0,"~/ics/Nate/HDDM_outputs_PD_Inhibition", "/gpfs/group/mnh5174/default/Nate/HDDM_outputs_PD_Inhibition")
R.utils::sourceDirectory(file.path(basedir, "Code/Functions/"))
# uber loop that performs posterior checks --------------------------------
suff_stats_all_pipelines <- list()
for(nsamp in nsamples){
for(task in tasks){
for(subs in full_sample){
diagnosdir <- file.path(hddm_outputdir,nsamp, task, subs, "diagnostics")
figuredir <- file.path(basedir,"Figures",nsamp, task, subs)
outdir <- file.path(basedir, "Outputs", "posterior_summaries", nsamp, task, subs)
DIC_path <- file.path(diagnosdir, "dics_all.csv")
for(mod in names(parameterizations[[task]])){
parameterizations[[task]][["traces"]] <- paste0(diagnosdir, "/",mod, "_traces.csv")
parameterizations[[task]][["gelman-rubin"]] <- paste0(diagnosdir, "/gr_",mod,".csv")
parameterizations[[task]][["outdir"]] <- outdir
parameterizations[[task]][["figuredir"]] <- figuredir
}
suff_stats_all_pipelines[[nsamp]][[task]][[subs]] <- hddm_posterior_diagnostics(parameterizations[[task]], DICs = DIC_path, v_contrasts = TRUE,m_digest = "win")
}
}
}
|
data <- read.table("household_power_consumption.txt", header=T, sep=";", dec=".")
data[, "Date"] <- as.Date(data[, "Date"],format = "%d/%m/%Y")
mydate1 <- factor("01/02/2007")
as.Date(mydate1, format = "%d/%m/%Y")
mydate2 <- factor("02/02/2007")
as.Date(mydate2, format = "%d/%m/%Y")
dates1 = which(data[, "Date"]==as.Date(mydate1, format = "%d/%m/%Y"))
dates2 = which(data[, "Date"]==as.Date(mydate2, format = "%d/%m/%Y"))
fil <- data[c(dates1,dates2),]
DT <- paste(as.character(fil$Date), as.character(fil$Time))
Date_Time <- strptime(DT, "%Y-%m-%d %H:%M:%S")
fil[,"Date"] <- as.data.frame(Date_Time)
fil <- fil[,-which(colnames(fil)=="Time")]
colnames(fil)[which(colnames(fil)=="Date")] <- "Date_Time"
fil[,"Global_active_power"] <- as.numeric(as.character(fil$Global_active_power))
hist(fil[,"Global_active_power"], col="red", main = "Global Active Power", xlab="Global Active Aower (kilowatts)")
dev.copy(png, file = "plot1.png", width = 480, height = 480, units = "px") ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
| /plot1.R | no_license | Elizasg/ExData_Plotting1 | R | false | false | 1,066 | r | data <- read.table("household_power_consumption.txt", header=T, sep=";", dec=".")
data[, "Date"] <- as.Date(data[, "Date"],format = "%d/%m/%Y")
mydate1 <- factor("01/02/2007")
as.Date(mydate1, format = "%d/%m/%Y")
mydate2 <- factor("02/02/2007")
as.Date(mydate2, format = "%d/%m/%Y")
dates1 = which(data[, "Date"]==as.Date(mydate1, format = "%d/%m/%Y"))
dates2 = which(data[, "Date"]==as.Date(mydate2, format = "%d/%m/%Y"))
fil <- data[c(dates1,dates2),]
DT <- paste(as.character(fil$Date), as.character(fil$Time))
Date_Time <- strptime(DT, "%Y-%m-%d %H:%M:%S")
fil[,"Date"] <- as.data.frame(Date_Time)
fil <- fil[,-which(colnames(fil)=="Time")]
colnames(fil)[which(colnames(fil)=="Date")] <- "Date_Time"
fil[,"Global_active_power"] <- as.numeric(as.character(fil$Global_active_power))
hist(fil[,"Global_active_power"], col="red", main = "Global Active Power", xlab="Global Active Aower (kilowatts)")
dev.copy(png, file = "plot1.png", width = 480, height = 480, units = "px") ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
|
#' @title differential binding analysis
#' @description use DBF to test differential peaks
#' @param counts output of \link{countTable}
#' @param ... parameters could be passed to \link[DESeq2]{DESeqDataSet}
#' @return an list of \link[GenomicRanges]{GRanges}
#' @import DESeq2
#' @import IRanges
#' @import BiocGenerics
#' @import SummarizedExperiment
#' @importFrom stats p.adjust pgamma
#' @importFrom ecodist MRM
#' @export
#' @author Jianhong Ou
#' @examples
#' path <- system.file("extdata", package = "diffPeaks", mustWork = TRUE)
#' bamfiles <- dir(path, "bam$")
#' peaks <- dir(path, "bed$")
#' p <- mergePeaks(file.path(path, peaks))
#' colData <- DataFrame(samples=bamfiles, condition=sub(".rep..bam", "", bamfiles))
#' cnt <- countTable(p, file.path(path, bamfiles), colData)
#' DBFpeaks(cnt, design= ~condition)
#'
DBFpeaks <- function(counts, ...){
isCountTable(counts)
## check the length of each peak,
## case 2: use the DESeq2 results, combine two results
## case 3: use the DESeq2 results, combine three results
## case 4~: use DBF.test
colData <- colData(counts$tile.feature)
gr <- rowRanges(counts$tile.feature)
comparison <- ""
dds <- DESeqDataSet(counts$tile.feature, ...)
dds <- DESeq(dds, betaPrior = FALSE, fitType = "local")
rld <- rlog(dds, blind = FALSE)
tile.cnt <- assay(rld)
tile.cnt <- as.data.frame(tile.cnt)
if(length(resultsNames(dds))==2 && resultsNames(dds)[1]=="Intercept"){
res <- results(dds)
resLFC <- lfcShrink(dds, coef = 2, res = res)
gr1 <- gr
mcols(gr1) <- cbind(mcols(gr), resLFC)
comparison <- strsplit(resultsNames(dds)[2], "_")[[1]][-3]
}else{
stop("Only simple comparison is supported.")
}
stopifnot(length(comparison)==3)
groups <- colData[, comparison[1]]
gpA <- comparison[2]
gpB <- comparison[3]
if(sum(groups %in% gpA)!=sum(groups %in% gpB)){
stop("unbalanced groups are not supported.")
}
tile.cnt.s <- split(tile.cnt, as.character(gr$X))
tile.cnt.dbf <- lapply(tile.cnt.s, function(.ele){
data <- rbind(as.matrix(unname(.ele[, which(groups %in% gpA)])),
as.matrix(unname(.ele[, which(groups %in% gpB)])))
rownames(data) <- NULL
maxDist <- rowSums(data)
maxDist <- maxDist[seq.int(nrow(.ele))] -
maxDist[nrow(.ele) + (seq.int(nrow(.ele)))]
maxDist <- maxDist[which.max(abs(maxDist))]
baseMean <- 2^mean(as.matrix(.ele))
group.labels <- rep(c(gpA, gpB), each=nrow(.ele))
stats <- tryCatch(DBF.test(as.matrix(ecodist::distance(data, "mahalanobis")),
group.labels, nrow(data)),
error=function(e){return(c(NA, NA))})
c(baseMean=baseMean, log2FoldChange=maxDist, stats)
})
tile.cnt.n <- as.numeric(names(tile.cnt.dbf))
tile.cnt.dbf <- do.call(rbind, tile.cnt.dbf)
dbf.res <- data.frame(X=tile.cnt.n, tile.cnt.dbf)
dbf.res <- dbf.res[match(seq_along(rowRanges(counts$feature)), dbf.res$X), ]
dbf.res$padj <- p.adjust(dbf.res$dbf.p.value, method="BH")
## case 2
## case 3
gr1 <- gr1[order(gr1$pvalue, decreasing = FALSE)]
gr1 <- gr1[!duplicated(gr1$X)]
gr1 <- gr1[order(gr1$X, decreasing = FALSE)]
gr1$range <- paste(start(gr1), end(gr1), sep="-")
ranges(gr1) <- ranges(counts$feature)
gr1$type <- "subRange"
## case 4~
nNA <- !is.na(dbf.res$dbf.p.value)
gr1[nNA]$type <- "curveComparison"
gr1[nNA]$baseMean <- dbf.res[nNA, "baseMean"]
gr1[nNA]$log2FoldChange <- dbf.res[nNA, "log2FoldChange"]
gr1[nNA]$stat <- dbf.res[nNA, "dbf.statistic"]
gr1[nNA]$pvalue <- dbf.res[nNA, "dbf.p.value"]
gr1[nNA]$padj <- dbf.res[nNA, "padj"] ## adjust p value too high.
gr2 <- split(gr, as.character(gr$X))
gr2 <- unlist(GRangesList(lapply(gr2, range)))
gr2 <- gr2[order(as.numeric(names(gr2)))]
stopifnot(identical(as.integer(names(gr2)), seq_along(gr1)))
gr1[nNA]$range <- paste(start(gr2), end(gr2), sep="-")[nNA]
gr1$X <- NULL
gr1
} | /R/DBFpeaks.R | no_license | jianhong/diffPeaks | R | false | false | 3,926 | r | #' @title differential binding analysis
#' @description use DBF to test differential peaks
#' @param counts output of \link{countTable}
#' @param ... parameters could be passed to \link[DESeq2]{DESeqDataSet}
#' @return an list of \link[GenomicRanges]{GRanges}
#' @import DESeq2
#' @import IRanges
#' @import BiocGenerics
#' @import SummarizedExperiment
#' @importFrom stats p.adjust pgamma
#' @importFrom ecodist MRM
#' @export
#' @author Jianhong Ou
#' @examples
#' path <- system.file("extdata", package = "diffPeaks", mustWork = TRUE)
#' bamfiles <- dir(path, "bam$")
#' peaks <- dir(path, "bed$")
#' p <- mergePeaks(file.path(path, peaks))
#' colData <- DataFrame(samples=bamfiles, condition=sub(".rep..bam", "", bamfiles))
#' cnt <- countTable(p, file.path(path, bamfiles), colData)
#' DBFpeaks(cnt, design= ~condition)
#'
DBFpeaks <- function(counts, ...){
isCountTable(counts)
## check the length of each peak,
## case 2: use the DESeq2 results, combine two results
## case 3: use the DESeq2 results, combine three results
## case 4~: use DBF.test
colData <- colData(counts$tile.feature)
gr <- rowRanges(counts$tile.feature)
comparison <- ""
dds <- DESeqDataSet(counts$tile.feature, ...)
dds <- DESeq(dds, betaPrior = FALSE, fitType = "local")
rld <- rlog(dds, blind = FALSE)
tile.cnt <- assay(rld)
tile.cnt <- as.data.frame(tile.cnt)
if(length(resultsNames(dds))==2 && resultsNames(dds)[1]=="Intercept"){
res <- results(dds)
resLFC <- lfcShrink(dds, coef = 2, res = res)
gr1 <- gr
mcols(gr1) <- cbind(mcols(gr), resLFC)
comparison <- strsplit(resultsNames(dds)[2], "_")[[1]][-3]
}else{
stop("Only simple comparison is supported.")
}
stopifnot(length(comparison)==3)
groups <- colData[, comparison[1]]
gpA <- comparison[2]
gpB <- comparison[3]
if(sum(groups %in% gpA)!=sum(groups %in% gpB)){
stop("unbalanced groups are not supported.")
}
tile.cnt.s <- split(tile.cnt, as.character(gr$X))
tile.cnt.dbf <- lapply(tile.cnt.s, function(.ele){
data <- rbind(as.matrix(unname(.ele[, which(groups %in% gpA)])),
as.matrix(unname(.ele[, which(groups %in% gpB)])))
rownames(data) <- NULL
maxDist <- rowSums(data)
maxDist <- maxDist[seq.int(nrow(.ele))] -
maxDist[nrow(.ele) + (seq.int(nrow(.ele)))]
maxDist <- maxDist[which.max(abs(maxDist))]
baseMean <- 2^mean(as.matrix(.ele))
group.labels <- rep(c(gpA, gpB), each=nrow(.ele))
stats <- tryCatch(DBF.test(as.matrix(ecodist::distance(data, "mahalanobis")),
group.labels, nrow(data)),
error=function(e){return(c(NA, NA))})
c(baseMean=baseMean, log2FoldChange=maxDist, stats)
})
tile.cnt.n <- as.numeric(names(tile.cnt.dbf))
tile.cnt.dbf <- do.call(rbind, tile.cnt.dbf)
dbf.res <- data.frame(X=tile.cnt.n, tile.cnt.dbf)
dbf.res <- dbf.res[match(seq_along(rowRanges(counts$feature)), dbf.res$X), ]
dbf.res$padj <- p.adjust(dbf.res$dbf.p.value, method="BH")
## case 2
## case 3
gr1 <- gr1[order(gr1$pvalue, decreasing = FALSE)]
gr1 <- gr1[!duplicated(gr1$X)]
gr1 <- gr1[order(gr1$X, decreasing = FALSE)]
gr1$range <- paste(start(gr1), end(gr1), sep="-")
ranges(gr1) <- ranges(counts$feature)
gr1$type <- "subRange"
## case 4~
nNA <- !is.na(dbf.res$dbf.p.value)
gr1[nNA]$type <- "curveComparison"
gr1[nNA]$baseMean <- dbf.res[nNA, "baseMean"]
gr1[nNA]$log2FoldChange <- dbf.res[nNA, "log2FoldChange"]
gr1[nNA]$stat <- dbf.res[nNA, "dbf.statistic"]
gr1[nNA]$pvalue <- dbf.res[nNA, "dbf.p.value"]
gr1[nNA]$padj <- dbf.res[nNA, "padj"] ## adjust p value too high.
gr2 <- split(gr, as.character(gr$X))
gr2 <- unlist(GRangesList(lapply(gr2, range)))
gr2 <- gr2[order(as.numeric(names(gr2)))]
stopifnot(identical(as.integer(names(gr2)), seq_along(gr1)))
gr1[nNA]$range <- paste(start(gr2), end(gr2), sep="-")[nNA]
gr1$X <- NULL
gr1
} |
library(metatest)
### Name: metatest
### Title: metatest fits and tests a metaregression model
### Aliases: metatest summary.metatest print.metatest
### Keywords: htest models regression
### ** Examples
data(metadata)
res <- metatest(y~x,yvar,data=metadata)
res
| /data/genthat_extracted_code/metatest/examples/metatest.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 269 | r | library(metatest)
### Name: metatest
### Title: metatest fits and tests a metaregression model
### Aliases: metatest summary.metatest print.metatest
### Keywords: htest models regression
### ** Examples
data(metadata)
res <- metatest(y~x,yvar,data=metadata)
res
|
#' Create normalizations of overlapping read counts.
#'
#' FPKM is short for "Fragments Per Kilobase of transcript per Million
#' fragments in library". When calculating RiboSeq data FPKM over ORFs,
#' use ORFs as `grl`.
#' When calculating RNASeq data FPKM, use full transcripts as
#' `grl`. It is equal to RPKM given that you do not have paired end reads.
#'
#' Note also that you must consider if you will use the whole read
#' library or just the reads overlapping `grl`.
#' To only overlap do:
#' reads <- reads[countOverlaps(reads, grl) > 0]
#' @references doi: 10.1038/nbt.1621
#' @param grl a \code{\link{GRangesList}} object
#' can be either transcripts, 5' utrs, cds', 3' utrs or
#' ORFs as a special case (uORFs, potential new cds' etc). If
#' regions are not spliced you can send a \code{\link{GRanges}} object.
#' @param reads a GAlignment, GRanges or GRangesList object,
#' usually of RiboSeq, RnaSeq, CageSeq, etc.
#' @param pseudoCount an integer, by default is 0, set it to 1 if you want to
#' avoid NA and inf values.
#' @return a numeric vector with the fpkm values
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20),
#' end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25),"+")
#' fpkm(grl, RFP)
#'
fpkm <- function(grl, reads, pseudoCount = 0) {
if (is.gr_or_grl(grl)) {
if(is.grl(grl)) {
grl_len <- widthPerGroup(grl, FALSE)
} else grl_len <- width(grl)
} else stop("grl must be GRangesList or GRanges")
overlaps <- countOverlaps(grl, reads)
librarySize <- length(reads)
return(fpkm_calc(overlaps, grl_len, librarySize) + pseudoCount)
}
#' Calucalte entropy value of overlapping input reads per GRanges.
#'
#' Calculates entropy of the `reads` coverage over each `grl` group.
#' The entropy value per group is a real number in the interval (0:1),
#' where 0 indicates no variance in reads over group.
#' For example c(0,0,0,0) has 0 entropy, since no reads overlap.
#' @param grl a \code{\link{GRangesList}} that the reads will
#' be overlapped with
#' @param reads a GAlignment, GRanges or GRangesList object,
#' usually of RiboSeq, RnaSeq, CageSeq, etc.
#' @return A numeric vector containing one entropy value per element in
#' `grl`
#' @family features
#' @export
#' @examples
#' # a toy example with ribo-seq p-shifted reads
#' ORF <- GRanges("1", ranges = IRanges(start = c(1, 12, 22),
#' end = c(10, 20, 32)),
#' strand = "+",
#' names = rep("tx1_1", 3))
#' names(ORF) <- rep("tx1", 3)
#' grl <- GRangesList(tx1_1 = ORF)
#' reads <- GRanges("1", IRanges(c(25, 35), c(25, 35)), "+")
#' # grl must have same names as cds + _1 etc, so that they can be matched.
#' entropy(grl, reads)
#' # or on cds
#' cdsORF <- GRanges("1", IRanges(35, 44), "+", names = "tx1")
#' names(cdsORF) <- "tx1"
#' cds <- GRangesList(tx1 = cdsORF)
#' entropy(cds, reads)
#'
entropy <- function(grl, reads) {
# Optimize: Get count list of only groups with hits
validIndices <- hasHits(grl, reads)
if (!any(validIndices)) { # no variance in countList, 0 entropy
return(rep(0, length(validIndices)))
}
grl <- grl[validIndices]
reOrdering <- uniqueOrder(grl)
# entropy function, interval 0:1 real number
# Xi is the ratio of hits per postion per group
Xi <- codonSumsPerGroup(grl, reads)
validXi <- Xi$codonSums > 0 # avoid log2(0)
Xi[, `:=` (Hx = rep(0, nrow(Xi)))]
Xi[validXi, Hx := codonSums * log2(codonSums)] # Hx: The codon sum part
Xi <- Xi[, .(Hx = sum(Hx)), by = genes]
codons <- numCodons(grl)
MHx <- 1/codons
Xi[, MHx := MHx * log2(MHx) * codons] # MHx: The length part
Xi[, entropy := Hx / MHx] # entropy is read sums over lengths
entropy <- rep(0.0, length(validIndices))
# non 0 entropy values set to HX / MHX
Xi[is.na(entropy), entropy := 0.]
tempEntro <- Xi$entropy[reOrdering] # order back from unique
entropy[validIndices] <- tempEntro # order back from hits
return(entropy)
}
#' Fragment Length Organization Similarity Score
#'
#' This feature is usually calcualted only for RiboSeq reads. For reads of
#' width between `start` and `end`,
#' sum the fraction of RiboSeq reads (per widths)
#' that overlap ORFs and normalize by CDS.
#'
#' Pseudo explanation of the function:
#' \preformatted{
#' SUM[start to stop]((grl[start:end][name]/grl) / (cds[start:end][name]/cds))
#' }
#' Please read more in the article.
#' @references doi: 10.1016/j.celrep.2014.07.045
#' @param grl a \code{\link{GRangesList}} object with ORFs
#' @param RFP ribosomal footprints, given as Galignment or GRanges object,
#' must be already shifted and resized to the p-site
#' @param cds a \code{\link{GRangesList}} of coding sequences,
#' cds has to have names as grl so that they can be matched
#' @param start usually 26, the start of the floss interval
#' @param end usually 34, the end of the floss interval
#' @return a vector of FLOSS of length same as grl
#' @family features
#' @export
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 12, 22),
#' end = c(10, 20, 32)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' # RFP is 1 width position based GRanges
#' RFP <- GRanges("1", IRanges(c(1, 25, 35, 38), width = 1), "+")
#' score(RFP) <- c(28, 28, 28, 29) # original width in score col
#' cds <- GRangesList(tx1 = GRanges("1", IRanges(35, 44), "+"))
#' # grl must have same names as cds + _1 etc, so that they can be matched.
#' floss(grl, RFP, cds)
#' # or change ribosome start/stop, more strict
#' floss(grl, RFP, cds, 28, 28)
#'
floss <- function(grl, RFP, cds, start = 26, end = 34){
if (start > end) stop("start is bigger than end")
if (is.grl(class(RFP))) {
stop("RFP must be either GAlignment or GRanges type")
}
# for orfs
overlaps <- findOverlaps(grl, RFP)
rfpWidth <- readWidths(RFP[to(overlaps)])
rfpPassFilter <- (rfpWidth >= start) & (rfpWidth <= end)
rfpValidMatch <- rfpWidth[rfpPassFilter]
ORFGrouping <- from(overlaps)[rfpPassFilter]
if (sum(as.numeric(ORFGrouping)) == 0) {
return(as.numeric(rep(0, length(grl))))
}
whichNoHit <- NULL # which ribo-seq did not hit grl
if (length(unique(ORFGrouping)) != length(grl)) {
whichNoHit <- S4Vectors::setdiff.Vector(
seq_along(grl), unique(ORFGrouping))
}
orfFractions <- split(rfpValidMatch, ORFGrouping)
listing<- IRanges::RleList(orfFractions)
tableFracs <- table(listing)
colnames(tableFracs) <- NULL
orfFractions <- lapply(seq_along(listing), function(x) {
tableFracs[x,] / sum(tableFracs[x,])
})
# for cds
overlapsCds <- findOverlaps(cds, RFP)
rfpWidth <- readWidths(RFP[to(overlapsCds)])
rfpPassFilterCDS <- ((rfpWidth >= start) & (rfpWidth <= end))
rfpValidMatchCDS <- rfpWidth[rfpPassFilterCDS]
cdsFractions <- split(rfpValidMatchCDS, rfpValidMatchCDS)
totalLength <- length(rfpValidMatchCDS)
cdsFractions <- vapply(cdsFractions, FUN.VALUE = c(1.0), FUN = function(x) {
length(x) / totalLength
})
cdsFractions <- as.double(cdsFractions)
# floss score ->
score <- vapply(seq_along(orfFractions), FUN.VALUE = c(1.0),
FUN = function(x) {
sum(abs(orfFractions[[x]] - cdsFractions)) * 0.5
})
if (!is.null(whichNoHit)) {
tempScores <- as.numeric(rep(NA, length(grl)))
tempScores[unique(ORFGrouping)] <- score
tempScores[whichNoHit] <- 0.
score <- tempScores
}
if (length(score) != length(grl) || anyNA(score)) {
stop("could not find floss-score for all objects, most",
"likely objects are wrongly annotated.")
}
return(score)
}
#' Translational efficiency
#'
#' Uses RnaSeq and RiboSeq to get translational efficiency of every element in
#' `grl`. Translational efficiency is defined as:
#' \preformatted{
#' (density of RPF within ORF) / (RNA expression of ORFs transcript)
#' }
#' @references doi: 10.1126/science.1168978
#' @param grl a \code{\link{GRangesList}} object
#' can be either transcripts, 5' utrs, cds', 3' utrs or
#' ORFs as a special case (uORFs, potential new cds' etc).
#' @param RNA RnaSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param tx a GRangesList of the transcripts. If you used cage data, then
#' the tss for the the leaders have changed, therefor the tx lengths have
#' changed. To account for that call:
#' `
#' translationalEff(grl, RNA, RFP, tx = extendLeaders(tx, cageFiveUTRs))
#' ` where cageFiveUTRs are the reannotated by CageSeq data leaders.
#' @param with.fpkm logical F, if true return the fpkm values together with
#' translational efficiency
#' @param pseudoCount an integer, 0, set it to 1 if you want to avoid NA and
#' inf values. It also helps against bias from low depth libraries.
#' @return a numeric vector of fpkm ratios, if with.fpkm is TRUE, return a
#' data.table with te and fpkm values
#' @export
#' @importFrom data.table data.table
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25), "+")
#' RNA <- GRanges("1", IRanges(1, 50), "+")
#' tx <- GRangesList(tx1 = GRanges("1", IRanges(1, 50), "+"))
#' # grl must have same names as cds + _1 etc, so that they can be matched.
#' te <- translationalEff(grl, RNA, RFP, tx, with.fpkm = TRUE, pseudoCount = 1)
#' te$fpkmRFP
#' te$te
#'
translationalEff <- function(grl, RNA, RFP, tx, with.fpkm = FALSE,
pseudoCount = 0) {
tx <- tx[txNames(grl)]
#normalize by tx lengths
fpkmRNA <- fpkm(tx, RNA, pseudoCount)
#normalize by grl lengths
fpkmRFP <- fpkm(grl, RFP, pseudoCount)
if (with.fpkm) {
return(data.table(fpkmRFP = fpkmRFP, fpkmRNA = fpkmRNA,
te = fpkmRFP / fpkmRNA))
}
return(fpkmRFP / fpkmRNA)
}
#' Disengagement score (DS)
#'
#' Disengagement score is defined as
#' \preformatted{(RPFs over ORF)/(RPFs downstream to transcript end)}
#' A pseudo-count of one is added to both the ORF and downstream sums.
#' @references doi: 10.1242/dev.098344
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders, cds', 3' utrs or ORFs.
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param GtfOrTx If it is \code{\link{TxDb}} object
#' transcripts will be extracted using
#' \code{exonsBy(Gtf, by = "tx", use.names = TRUE)}.
#' Else it must be \code{\link{GRangesList}}
#' @param RFP.sorted logical (F), an optimizer, have you ran this line:
#' \code{RFP <- sort(RFP[countOverlaps(RFP, tx, type = "within") > 0])}
#' Normally not touched, for internal optimization purposes.
#' @return a named vector of numeric values of scores
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' tx <- GRangesList(tx1 = GRanges("1", IRanges(1, 50), "+"))
#' RFP <- GRanges("1", IRanges(c(1,10,20,30,40), width = 3), "+")
#' disengagementScore(grl, RFP, tx)
#'
disengagementScore <- function(grl, RFP, GtfOrTx, RFP.sorted = FALSE) {
tx <- loadRegion(GtfOrTx)
# exclude non hits and set them to 0
validIndices <- hasHits(tx, RFP)
validIndices <- validIndices[data.table::chmatch(txNames(grl), names(tx))]
if (!any(validIndices)) { # if no hits
score <- countOverlaps(grl, RFP) + 1
names(score) <- NULL
return(score)
}
overlapDownstream <- rep(1, length(grl))
grlStops <- stopSites(grl[validIndices], asGR = FALSE, is.sorted = TRUE)
downstreamTx <- downstreamOfPerGroup(tx[txNames(grl)][validIndices],
grlStops)
# check for big lists
if (length(downstreamTx) > 5e5) {
if(!RFP.sorted){
RFP <- sort(RFP[countOverlaps(RFP, tx, type = "within") > 0])
}
ordering <- uniqueOrder(downstreamTx)
downstreamTx <- uniqueGroups(downstreamTx)
overlapDownstream[validIndices] <- countOverlaps(downstreamTx,
RFP)[ordering] + 1
} else {
overlapDownstream[validIndices] <- countOverlaps(downstreamTx, RFP) + 1
}
overlapGrl <- countOverlaps(grl, RFP) + 1
score <- overlapGrl / overlapDownstream
names(score) <- NULL
return(score)
}
#' Inside/Outside score (IO)
#'
#' Inside/Outside score is defined as
#' \preformatted{(reads over ORF)/(reads outside ORF and within transcript)}
#' A pseudo-count of one is added to both the ORF and outside sums.
#' @references doi: 10.1242/dev.098345
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders, cds', 3' utrs or ORFs
#' @param RFP ribo seq reads as GAlignment, GRanges or GRangesList object
#' @param GtfOrTx if Gtf: a TxDb object of a gtf file that transcripts will be
#' extracted with `exonsBy(Gtf, by = "tx", use.names = TRUE)`, if
#' a GrangesList will use as is
#' @param ds numeric vector (NULL), disengagement score. If you have already
#' calculated \code{\link{disengagementScore}}, input here to save time.
#' @param RFP.sorted logical (F), have you ran this line:
#' \code{RFP <- sort(RFP[countOverlaps(RFP, tx, type = "within") > 0])}
#' Normally not touched, for internal optimization purposes.
#' @return a named vector of numeric values of scores
#' @importFrom data.table rbindlist
#' @family features
#' @export
#' @examples
#' # Check inside outside score of a ORF within a transcript
#' ORF <- GRanges("1",
#' ranges = IRanges(start = c(20, 30, 40),
#' end = c(25, 35, 45)),
#' strand = "+")
#'
#' grl <- GRangesList(tx1_1 = ORF)
#'
#' tx1 <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20, 30, 40, 50),
#' end = c(5, 15, 25, 35, 45, 200)),
#' strand = "+")
#' tx <- GRangesList(tx1 = tx1)
#' RFP <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 4, 30, 60, 80, 90),
#' end = c(30, 33, 63, 90, 110, 120)),
#' strand = "+")
#'
#' insideOutsideORF(grl, RFP, tx)
#'
insideOutsideORF <- function(grl, RFP, GtfOrTx, ds = NULL,
RFP.sorted = FALSE) {
tx <- loadRegion(GtfOrTx)
if (!RFP.sorted) RFP <- optimizeReads(tx, RFP)
overlapGrl <- countOverlaps(grl, RFP) + 1
# find tx with hits
validIndices <- hasHits(tx, RFP)
validIndices <- validIndices[data.table::chmatch(txNames(grl), names(tx))]
if (!any(validIndices)) { # if no hits
names(overlapGrl) <- NULL
return(overlapGrl)
}
tx <- tx[txNames(grl)][validIndices]
grl <- grl[validIndices]
grlStarts <- startSites(grl, asGR = FALSE, is.sorted = TRUE)
upstreamTx <- upstreamOfPerGroup(tx, grlStarts, allowOutside = FALSE)
overlapTxOutside <- rep(1, length(validIndices))
if (!is.null(ds)) { # save time here if ds is defined
downstreamCounts <- 1 / (ds / overlapGrl)
upstreamCounts <- rep(1, length(validIndices))
upstreamCounts[validIndices] <- countOverlaps(upstreamTx, RFP)
overlapTxOutside <- downstreamCounts + upstreamCounts
} else { # else make ds again
grlStops <- stopSites(grl, asGR = FALSE, is.sorted = TRUE)
downstreamTx <- downstreamOfPerGroup(tx, grlStops)
dtmerge <- data.table::rbindlist(l = list(as.data.table(upstreamTx),
as.data.table(downstreamTx)))
group <- NULL # for avoiding warning
txOutside <- makeGRangesListFromDataFrame(
dtmerge[order(group)], split.field = "group")
overlapTxOutside[validIndices] <- countOverlaps(txOutside, RFP) + 1
}
scores <- overlapGrl / overlapTxOutside
names(scores) = NULL
return(scores)
}
#' Ribosome Release Score (RRS)
#'
#' Ribosome Release Score is defined as
#' \preformatted{(RPFs over ORF)/(RPFs over 3' utrs)} and
#' additionaly normalized by lengths.
#' If RNA is added as argument, it will normalize by RNA counts
#' to justify location of 3' utrs.
#' It can be understood as a ribosome stalling feature.
#' A pseudo-count of one was added to both the ORF and downstream sums.
#' @references doi: 10.1016/j.cell.2013.06.009
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders,
#' cds', 3' utrs or ORFs.
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param GtfOrThreeUtrs if Gtf: a TxDb object of a gtf file transcripts is
#' called from: `threeUTRsByTranscript(Gtf, use.names = TRUE)`,
#' if object is GRangesList, it is presumed to be the 3' utrs
#' @param RNA RnaSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @return a named vector of numeric values of scores, NA means that
#' no 3' utr was found for that transcript.
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' threeUTRs <- GRangesList(tx1 = GRanges("1", IRanges(40, 50), "+"))
#' RFP <- GRanges("1", IRanges(25, 25), "+")
#' RNA <- GRanges("1", IRanges(1, 50), "+")
#' ribosomeReleaseScore(grl, RFP, threeUTRs, RNA)
#'
ribosomeReleaseScore <- function(grl, RFP, GtfOrThreeUtrs, RNA = NULL){
threeUTRs <- loadRegion(GtfOrThreeUtrs, part = "trailer")
# check that naming is correct, else change it.
orfNames <- txNames(grl, FALSE)
validNamesThree <- names(threeUTRs) %in% orfNames
validNamesGRL <- orfNames %in% names(threeUTRs)
rrs <- rep(NA,length(grl))
if (sum(validNamesGRL) != length(grl)) {
threeUTRs <- threeUTRs[validNamesThree]
grl <- grl[validNamesGRL]
}
overlapGrl <- countOverlaps(grl, RFP) + 1
threeUTRs <- threeUTRs[orfNames[validNamesGRL]]
overlapThreeUtrs <- countOverlaps(threeUTRs, RFP) + 1
rrs[validNamesGRL] <- (overlapGrl / widthPerGroup(grl)) /
(overlapThreeUtrs / widthPerGroup(threeUTRs))
if (!is.null(RNA)) { # normalize by rna ratio
rnaRatio <- (countOverlaps(grl, RNA) + 1) /
(countOverlaps(threeUTRs, RNA) + 1)
rrs[validNamesGRL] <- rrs[validNamesGRL] / rnaRatio
}
names(rrs) <- NULL
return(rrs)
}
#' Ribosome Stalling Score (RSS)
#'
#' Is defined as \preformatted{(RPFs over ORF stop sites)/(RPFs over ORFs)}
#' and normalized by lengths
#' A pseudo-count of one was added to both the ORF and downstream sums.
#' @references doi: 10.1016/j.cels.2017.08.004
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders,
#' cds', 3' utrs or ORFs.
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @return a named vector of numeric values of RSS scores
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25), "+")
#' ribosomeStallingScore(grl, RFP)
#'
ribosomeStallingScore <- function(grl, RFP){
grl_len <- widthPerGroup(grl, FALSE)
overlapGrl <- countOverlaps(grl, RFP)
stopCodons <- stopCodons(grl, is.sorted = TRUE)
overlapStop <- countOverlaps(stopCodons, RFP)
rss <- ((overlapStop + 1) / 3) / ((overlapGrl + 1) / grl_len)
names(rss) <- NULL
return(rss)
}
#' Start region coverage
#'
#' Get the number of reads in the start region of each ORF. If you want the
#' start codon coverage only, set upstream = 0. Standard is 2 upstream
#' and 2 downstream, a width 5 window centered at start site. since
#' p-shifting is not 100% accurate, this window is usually the reads from the
#' start site.
#'
#' If tx is null, then upstream will be force to 0 and downstream to
#' a maximum of grl width. Since there is no reference for splicing.
#' @param RFP ribo seq reads as GAlignment, GRanges or GRangesList object
#' @inheritParams startRegion
#' @family features
#' @return a numeric vector of counts
startRegionCoverage <- function(grl, RFP, tx = NULL, is.sorted = TRUE,
upstream = 2L, downstream = 2L) {
region <- startRegion(grl, tx, is.sorted, upstream, downstream)
return(countOverlaps(region, RFP))
}
#' Get initiation score for a GRangesList of ORFs
#'
#' initiationScore tries to check how much each TIS region resembles, the
#' average of the CDS TIS regions.
#'
#' Since this features uses a distance matrix for scoring, values are
#' distributed like this:
#' As result there is one value per ORF:
#' 0.000: means that ORF had no reads
#' -1.000: means that ORF is identical to average of CDS
#' 1.000: means that orf is maximum different than average of CDS
#' @references doi: 10.1186/s12915-017-0416-0
#' @param grl a \code{\link{GRangesList}} object with ORFs
#' @param cds a \code{\link{GRangesList}} object with coding sequences
#' @param tx a GrangesList of transcripts covering grl.
#' @param reads ribosomal footprints, given as Galignment object or
#' Granges
#' @param pShifted a logical (TRUE), are riboseq reads p-shifted?
#' @family features
#' @return an integer vector, 1 score per ORF, with names of grl
#' @export
#' @importFrom BiocGenerics Reduce
#' @examples
#' # Good hiting ORF
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(21, 40),
#' strand = "+")
#' names(ORF) <- c("tx1")
#' grl <- GRangesList(tx1 = ORF)
#' # 1 width p-shifted reads
#' reads <- GRanges("1", IRanges(c(21, 23, 50, 50, 50, 53, 53, 56, 59),
#' width = 1), "+")
#' score(reads) <- 28 # original width
#' cds <- GRanges(seqnames = "1",
#' ranges = IRanges(50, 80),
#' strand = "+")
#' cds <- GRangesList(tx1 = cds)
#' tx <- GRanges(seqnames = "1",
#' ranges = IRanges(1, 85),
#' strand = "+")
#' tx <- GRangesList(tx1 = tx)
#'
#' initiationScore(grl, cds, tx, reads, pShifted = TRUE)
#'
initiationScore <- function(grl, cds, tx, reads, pShifted = TRUE) {
if (length(grl) == 0) stop("grl must have length > 0")
# meta coverage of cds
cdsMeta <- windowPerReadLength(cds, tx, reads, pShifted = pShifted)
# coverage per ORF
prop <- windowPerReadLength(grl, tx, reads, pShifted = pShifted,
scoring = "fracPos")
# find a better scoring pattern
prop[, `:=` (dif = abs(score - cdsMeta$score))]
len <- length(unique(prop$fraction))
ans <- prop[, .(difPer = sum(dif)), by = list(fraction, genes)]
ans <- ans[, .(score = sum(difPer)/len - 1), by = list(genes)]$score
ans[is.na(ans) | is.nan(ans)] <- 0
names(ans) <- names(grl)
return(ans)
}
#' Get ORFscore for a GRangesList of ORFs
#'
#' ORFscore tries to check whether the first frame of the 3 possible frames in
#' an ORF has more reads than second and third frame.
#'
#' Pseudocode:
#' assume rff - is reads fraction in specific frame
#' \preformatted{ORFScore = log(rrf1 + rrf2 + rrf3)}
#' For all ORFs where rrf2 or rrf3 is bigger than rff1,
#' negate the resulting value.
#' \preformatted{ORFScore[rrf1Smaller] <- ORFScore[rrf1Smaller] * -1}
#'
#' As result there is one value per ORF:
#' Positive values say that the first frame have the most reads,
#' negative values say that the first frame does not have the most reads.
#' NOTE: If reads are not of width 1, then a read from 1-4 on range of 1-4,
#' will get scores frame1 = 2, frame2 = 1, frame3 = 1. What could be logical
#' is that only the 5' end is important, so that only frame1 = 1,
#' to get this, you first resize reads to 5'end only.
#'
#' NOTE: p shifting is not exact, so some functional ORFs will get a
#' bad ORF score.
#' @references doi: 10.1002/embj.201488411
#' @param grl a \code{\link{GRangesList}} object with ORFs
#' @param RFP ribosomal footprints, given as Galignment object,
#' Granges or GRangesList
#' @param is.sorted logical (F), is grl sorted.
#' @importFrom data.table .SD
#' @importFrom data.table .N
#' @family features
#' @export
#' @return a data.table with 4 columns, the orfscore (ORFScores) and score of
#' each of the 3 tiles (frame_zero_RP, frame_one_RP, frame_two_RP)
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' names(ORF) <- c("tx1", "tx1", "tx1")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25), "+") # 1 width position based
#' score(RFP) <- 28 # original width
#' orfScore(grl, RFP) # negative because more hits on frames 1,2 than 0.
#'
#' # example with positive result, more hits on frame 0 (in frame of ORF)
#' RFP <- GRanges("1", IRanges(c(1, 1, 1, 25), width = 1), "+")
#' score(RFP) <- c(28, 29, 31, 28) # original width
#' orfScore(grl, RFP)
#'
orfScore <- function(grl, RFP, is.sorted = FALSE) {
if (any(widthPerGroup(grl, FALSE) < 3)) stop("width < 3 ORFs not allowed")
counts <- coveragePerTiling(grl, RFP, is.sorted, as.data.table = TRUE,
withFrames = TRUE)
total <- coverageScorings(counts, scoring = "frameSum")
countsTile1 <- total[frame == 0,]$score
countsTile2 <- total[frame == 1,]$score
countsTile3 <- total[frame == 2,]$score
RP = countsTile1 + countsTile2 + countsTile3
Ftotal <- RP/3
frame1 <- (countsTile1 - Ftotal)^2 / Ftotal
frame2 <- (countsTile2 - Ftotal)^2 / Ftotal
frame3 <- (countsTile3 - Ftotal)^2 / Ftotal
dfORFs <- data.table(frame_zero_RP = countsTile1)
dfORFs[, frame_one_RP := countsTile2]
dfORFs[, frame_two_RP := countsTile3]
ORFscore <- log2(frame1 + frame2 + frame3 + 1)
revORFscore <- which(frame1 < frame2 | frame1 < frame3)
ORFscore[revORFscore] <- -1 * ORFscore[revORFscore]
ORFscore[is.na(ORFscore)] <- 0
dfORFs$ORFScores <- ORFscore
dfORFs[] # for print
return(dfORFs)
}
| /R/riboseq_features.R | permissive | lukun06/ORFik | R | false | false | 26,067 | r | #' Create normalizations of overlapping read counts.
#'
#' FPKM is short for "Fragments Per Kilobase of transcript per Million
#' fragments in library". When calculating RiboSeq data FPKM over ORFs,
#' use ORFs as `grl`.
#' When calculating RNASeq data FPKM, use full transcripts as
#' `grl`. It is equal to RPKM given that you do not have paired end reads.
#'
#' Note also that you must consider if you will use the whole read
#' library or just the reads overlapping `grl`.
#' To only overlap do:
#' reads <- reads[countOverlaps(reads, grl) > 0]
#' @references doi: 10.1038/nbt.1621
#' @param grl a \code{\link{GRangesList}} object
#' can be either transcripts, 5' utrs, cds', 3' utrs or
#' ORFs as a special case (uORFs, potential new cds' etc). If
#' regions are not spliced you can send a \code{\link{GRanges}} object.
#' @param reads a GAlignment, GRanges or GRangesList object,
#' usually of RiboSeq, RnaSeq, CageSeq, etc.
#' @param pseudoCount an integer, by default is 0, set it to 1 if you want to
#' avoid NA and inf values.
#' @return a numeric vector with the fpkm values
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20),
#' end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25),"+")
#' fpkm(grl, RFP)
#'
fpkm <- function(grl, reads, pseudoCount = 0) {
if (is.gr_or_grl(grl)) {
if(is.grl(grl)) {
grl_len <- widthPerGroup(grl, FALSE)
} else grl_len <- width(grl)
} else stop("grl must be GRangesList or GRanges")
overlaps <- countOverlaps(grl, reads)
librarySize <- length(reads)
return(fpkm_calc(overlaps, grl_len, librarySize) + pseudoCount)
}
#' Calucalte entropy value of overlapping input reads per GRanges.
#'
#' Calculates entropy of the `reads` coverage over each `grl` group.
#' The entropy value per group is a real number in the interval (0:1),
#' where 0 indicates no variance in reads over group.
#' For example c(0,0,0,0) has 0 entropy, since no reads overlap.
#' @param grl a \code{\link{GRangesList}} that the reads will
#' be overlapped with
#' @param reads a GAlignment, GRanges or GRangesList object,
#' usually of RiboSeq, RnaSeq, CageSeq, etc.
#' @return A numeric vector containing one entropy value per element in
#' `grl`
#' @family features
#' @export
#' @examples
#' # a toy example with ribo-seq p-shifted reads
#' ORF <- GRanges("1", ranges = IRanges(start = c(1, 12, 22),
#' end = c(10, 20, 32)),
#' strand = "+",
#' names = rep("tx1_1", 3))
#' names(ORF) <- rep("tx1", 3)
#' grl <- GRangesList(tx1_1 = ORF)
#' reads <- GRanges("1", IRanges(c(25, 35), c(25, 35)), "+")
#' # grl must have same names as cds + _1 etc, so that they can be matched.
#' entropy(grl, reads)
#' # or on cds
#' cdsORF <- GRanges("1", IRanges(35, 44), "+", names = "tx1")
#' names(cdsORF) <- "tx1"
#' cds <- GRangesList(tx1 = cdsORF)
#' entropy(cds, reads)
#'
entropy <- function(grl, reads) {
# Optimize: Get count list of only groups with hits
validIndices <- hasHits(grl, reads)
if (!any(validIndices)) { # no variance in countList, 0 entropy
return(rep(0, length(validIndices)))
}
grl <- grl[validIndices]
reOrdering <- uniqueOrder(grl)
# entropy function, interval 0:1 real number
# Xi is the ratio of hits per postion per group
Xi <- codonSumsPerGroup(grl, reads)
validXi <- Xi$codonSums > 0 # avoid log2(0)
Xi[, `:=` (Hx = rep(0, nrow(Xi)))]
Xi[validXi, Hx := codonSums * log2(codonSums)] # Hx: The codon sum part
Xi <- Xi[, .(Hx = sum(Hx)), by = genes]
codons <- numCodons(grl)
MHx <- 1/codons
Xi[, MHx := MHx * log2(MHx) * codons] # MHx: The length part
Xi[, entropy := Hx / MHx] # entropy is read sums over lengths
entropy <- rep(0.0, length(validIndices))
# non 0 entropy values set to HX / MHX
Xi[is.na(entropy), entropy := 0.]
tempEntro <- Xi$entropy[reOrdering] # order back from unique
entropy[validIndices] <- tempEntro # order back from hits
return(entropy)
}
#' Fragment Length Organization Similarity Score
#'
#' This feature is usually calcualted only for RiboSeq reads. For reads of
#' width between `start` and `end`,
#' sum the fraction of RiboSeq reads (per widths)
#' that overlap ORFs and normalize by CDS.
#'
#' Pseudo explanation of the function:
#' \preformatted{
#' SUM[start to stop]((grl[start:end][name]/grl) / (cds[start:end][name]/cds))
#' }
#' Please read more in the article.
#' @references doi: 10.1016/j.celrep.2014.07.045
#' @param grl a \code{\link{GRangesList}} object with ORFs
#' @param RFP ribosomal footprints, given as Galignment or GRanges object,
#' must be already shifted and resized to the p-site
#' @param cds a \code{\link{GRangesList}} of coding sequences,
#' cds has to have names as grl so that they can be matched
#' @param start usually 26, the start of the floss interval
#' @param end usually 34, the end of the floss interval
#' @return a vector of FLOSS of length same as grl
#' @family features
#' @export
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 12, 22),
#' end = c(10, 20, 32)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' # RFP is 1 width position based GRanges
#' RFP <- GRanges("1", IRanges(c(1, 25, 35, 38), width = 1), "+")
#' score(RFP) <- c(28, 28, 28, 29) # original width in score col
#' cds <- GRangesList(tx1 = GRanges("1", IRanges(35, 44), "+"))
#' # grl must have same names as cds + _1 etc, so that they can be matched.
#' floss(grl, RFP, cds)
#' # or change ribosome start/stop, more strict
#' floss(grl, RFP, cds, 28, 28)
#'
floss <- function(grl, RFP, cds, start = 26, end = 34){
if (start > end) stop("start is bigger than end")
if (is.grl(class(RFP))) {
stop("RFP must be either GAlignment or GRanges type")
}
# for orfs
overlaps <- findOverlaps(grl, RFP)
rfpWidth <- readWidths(RFP[to(overlaps)])
rfpPassFilter <- (rfpWidth >= start) & (rfpWidth <= end)
rfpValidMatch <- rfpWidth[rfpPassFilter]
ORFGrouping <- from(overlaps)[rfpPassFilter]
if (sum(as.numeric(ORFGrouping)) == 0) {
return(as.numeric(rep(0, length(grl))))
}
whichNoHit <- NULL # which ribo-seq did not hit grl
if (length(unique(ORFGrouping)) != length(grl)) {
whichNoHit <- S4Vectors::setdiff.Vector(
seq_along(grl), unique(ORFGrouping))
}
orfFractions <- split(rfpValidMatch, ORFGrouping)
listing<- IRanges::RleList(orfFractions)
tableFracs <- table(listing)
colnames(tableFracs) <- NULL
orfFractions <- lapply(seq_along(listing), function(x) {
tableFracs[x,] / sum(tableFracs[x,])
})
# for cds
overlapsCds <- findOverlaps(cds, RFP)
rfpWidth <- readWidths(RFP[to(overlapsCds)])
rfpPassFilterCDS <- ((rfpWidth >= start) & (rfpWidth <= end))
rfpValidMatchCDS <- rfpWidth[rfpPassFilterCDS]
cdsFractions <- split(rfpValidMatchCDS, rfpValidMatchCDS)
totalLength <- length(rfpValidMatchCDS)
cdsFractions <- vapply(cdsFractions, FUN.VALUE = c(1.0), FUN = function(x) {
length(x) / totalLength
})
cdsFractions <- as.double(cdsFractions)
# floss score ->
score <- vapply(seq_along(orfFractions), FUN.VALUE = c(1.0),
FUN = function(x) {
sum(abs(orfFractions[[x]] - cdsFractions)) * 0.5
})
if (!is.null(whichNoHit)) {
tempScores <- as.numeric(rep(NA, length(grl)))
tempScores[unique(ORFGrouping)] <- score
tempScores[whichNoHit] <- 0.
score <- tempScores
}
if (length(score) != length(grl) || anyNA(score)) {
stop("could not find floss-score for all objects, most",
"likely objects are wrongly annotated.")
}
return(score)
}
#' Translational efficiency
#'
#' Uses RnaSeq and RiboSeq to get translational efficiency of every element in
#' `grl`. Translational efficiency is defined as:
#' \preformatted{
#' (density of RPF within ORF) / (RNA expression of ORFs transcript)
#' }
#' @references doi: 10.1126/science.1168978
#' @param grl a \code{\link{GRangesList}} object
#' can be either transcripts, 5' utrs, cds', 3' utrs or
#' ORFs as a special case (uORFs, potential new cds' etc).
#' @param RNA RnaSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param tx a GRangesList of the transcripts. If you used cage data, then
#' the tss for the the leaders have changed, therefor the tx lengths have
#' changed. To account for that call:
#' `
#' translationalEff(grl, RNA, RFP, tx = extendLeaders(tx, cageFiveUTRs))
#' ` where cageFiveUTRs are the reannotated by CageSeq data leaders.
#' @param with.fpkm logical F, if true return the fpkm values together with
#' translational efficiency
#' @param pseudoCount an integer, 0, set it to 1 if you want to avoid NA and
#' inf values. It also helps against bias from low depth libraries.
#' @return a numeric vector of fpkm ratios, if with.fpkm is TRUE, return a
#' data.table with te and fpkm values
#' @export
#' @importFrom data.table data.table
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25), "+")
#' RNA <- GRanges("1", IRanges(1, 50), "+")
#' tx <- GRangesList(tx1 = GRanges("1", IRanges(1, 50), "+"))
#' # grl must have same names as cds + _1 etc, so that they can be matched.
#' te <- translationalEff(grl, RNA, RFP, tx, with.fpkm = TRUE, pseudoCount = 1)
#' te$fpkmRFP
#' te$te
#'
translationalEff <- function(grl, RNA, RFP, tx, with.fpkm = FALSE,
pseudoCount = 0) {
tx <- tx[txNames(grl)]
#normalize by tx lengths
fpkmRNA <- fpkm(tx, RNA, pseudoCount)
#normalize by grl lengths
fpkmRFP <- fpkm(grl, RFP, pseudoCount)
if (with.fpkm) {
return(data.table(fpkmRFP = fpkmRFP, fpkmRNA = fpkmRNA,
te = fpkmRFP / fpkmRNA))
}
return(fpkmRFP / fpkmRNA)
}
#' Disengagement score (DS)
#'
#' Disengagement score is defined as
#' \preformatted{(RPFs over ORF)/(RPFs downstream to transcript end)}
#' A pseudo-count of one is added to both the ORF and downstream sums.
#' @references doi: 10.1242/dev.098344
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders, cds', 3' utrs or ORFs.
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param GtfOrTx If it is \code{\link{TxDb}} object
#' transcripts will be extracted using
#' \code{exonsBy(Gtf, by = "tx", use.names = TRUE)}.
#' Else it must be \code{\link{GRangesList}}
#' @param RFP.sorted logical (F), an optimizer, have you ran this line:
#' \code{RFP <- sort(RFP[countOverlaps(RFP, tx, type = "within") > 0])}
#' Normally not touched, for internal optimization purposes.
#' @return a named vector of numeric values of scores
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' tx <- GRangesList(tx1 = GRanges("1", IRanges(1, 50), "+"))
#' RFP <- GRanges("1", IRanges(c(1,10,20,30,40), width = 3), "+")
#' disengagementScore(grl, RFP, tx)
#'
disengagementScore <- function(grl, RFP, GtfOrTx, RFP.sorted = FALSE) {
tx <- loadRegion(GtfOrTx)
# exclude non hits and set them to 0
validIndices <- hasHits(tx, RFP)
validIndices <- validIndices[data.table::chmatch(txNames(grl), names(tx))]
if (!any(validIndices)) { # if no hits
score <- countOverlaps(grl, RFP) + 1
names(score) <- NULL
return(score)
}
overlapDownstream <- rep(1, length(grl))
grlStops <- stopSites(grl[validIndices], asGR = FALSE, is.sorted = TRUE)
downstreamTx <- downstreamOfPerGroup(tx[txNames(grl)][validIndices],
grlStops)
# check for big lists
if (length(downstreamTx) > 5e5) {
if(!RFP.sorted){
RFP <- sort(RFP[countOverlaps(RFP, tx, type = "within") > 0])
}
ordering <- uniqueOrder(downstreamTx)
downstreamTx <- uniqueGroups(downstreamTx)
overlapDownstream[validIndices] <- countOverlaps(downstreamTx,
RFP)[ordering] + 1
} else {
overlapDownstream[validIndices] <- countOverlaps(downstreamTx, RFP) + 1
}
overlapGrl <- countOverlaps(grl, RFP) + 1
score <- overlapGrl / overlapDownstream
names(score) <- NULL
return(score)
}
#' Inside/Outside score (IO)
#'
#' Inside/Outside score is defined as
#' \preformatted{(reads over ORF)/(reads outside ORF and within transcript)}
#' A pseudo-count of one is added to both the ORF and outside sums.
#' @references doi: 10.1242/dev.098345
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders, cds', 3' utrs or ORFs
#' @param RFP ribo seq reads as GAlignment, GRanges or GRangesList object
#' @param GtfOrTx if Gtf: a TxDb object of a gtf file that transcripts will be
#' extracted with `exonsBy(Gtf, by = "tx", use.names = TRUE)`, if
#' a GrangesList will use as is
#' @param ds numeric vector (NULL), disengagement score. If you have already
#' calculated \code{\link{disengagementScore}}, input here to save time.
#' @param RFP.sorted logical (F), have you ran this line:
#' \code{RFP <- sort(RFP[countOverlaps(RFP, tx, type = "within") > 0])}
#' Normally not touched, for internal optimization purposes.
#' @return a named vector of numeric values of scores
#' @importFrom data.table rbindlist
#' @family features
#' @export
#' @examples
#' # Check inside outside score of a ORF within a transcript
#' ORF <- GRanges("1",
#' ranges = IRanges(start = c(20, 30, 40),
#' end = c(25, 35, 45)),
#' strand = "+")
#'
#' grl <- GRangesList(tx1_1 = ORF)
#'
#' tx1 <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20, 30, 40, 50),
#' end = c(5, 15, 25, 35, 45, 200)),
#' strand = "+")
#' tx <- GRangesList(tx1 = tx1)
#' RFP <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 4, 30, 60, 80, 90),
#' end = c(30, 33, 63, 90, 110, 120)),
#' strand = "+")
#'
#' insideOutsideORF(grl, RFP, tx)
#'
insideOutsideORF <- function(grl, RFP, GtfOrTx, ds = NULL,
RFP.sorted = FALSE) {
tx <- loadRegion(GtfOrTx)
if (!RFP.sorted) RFP <- optimizeReads(tx, RFP)
overlapGrl <- countOverlaps(grl, RFP) + 1
# find tx with hits
validIndices <- hasHits(tx, RFP)
validIndices <- validIndices[data.table::chmatch(txNames(grl), names(tx))]
if (!any(validIndices)) { # if no hits
names(overlapGrl) <- NULL
return(overlapGrl)
}
tx <- tx[txNames(grl)][validIndices]
grl <- grl[validIndices]
grlStarts <- startSites(grl, asGR = FALSE, is.sorted = TRUE)
upstreamTx <- upstreamOfPerGroup(tx, grlStarts, allowOutside = FALSE)
overlapTxOutside <- rep(1, length(validIndices))
if (!is.null(ds)) { # save time here if ds is defined
downstreamCounts <- 1 / (ds / overlapGrl)
upstreamCounts <- rep(1, length(validIndices))
upstreamCounts[validIndices] <- countOverlaps(upstreamTx, RFP)
overlapTxOutside <- downstreamCounts + upstreamCounts
} else { # else make ds again
grlStops <- stopSites(grl, asGR = FALSE, is.sorted = TRUE)
downstreamTx <- downstreamOfPerGroup(tx, grlStops)
dtmerge <- data.table::rbindlist(l = list(as.data.table(upstreamTx),
as.data.table(downstreamTx)))
group <- NULL # for avoiding warning
txOutside <- makeGRangesListFromDataFrame(
dtmerge[order(group)], split.field = "group")
overlapTxOutside[validIndices] <- countOverlaps(txOutside, RFP) + 1
}
scores <- overlapGrl / overlapTxOutside
names(scores) = NULL
return(scores)
}
#' Ribosome Release Score (RRS)
#'
#' Ribosome Release Score is defined as
#' \preformatted{(RPFs over ORF)/(RPFs over 3' utrs)} and
#' additionaly normalized by lengths.
#' If RNA is added as argument, it will normalize by RNA counts
#' to justify location of 3' utrs.
#' It can be understood as a ribosome stalling feature.
#' A pseudo-count of one was added to both the ORF and downstream sums.
#' @references doi: 10.1016/j.cell.2013.06.009
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders,
#' cds', 3' utrs or ORFs.
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @param GtfOrThreeUtrs if Gtf: a TxDb object of a gtf file transcripts is
#' called from: `threeUTRsByTranscript(Gtf, use.names = TRUE)`,
#' if object is GRangesList, it is presumed to be the 3' utrs
#' @param RNA RnaSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @return a named vector of numeric values of scores, NA means that
#' no 3' utr was found for that transcript.
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' threeUTRs <- GRangesList(tx1 = GRanges("1", IRanges(40, 50), "+"))
#' RFP <- GRanges("1", IRanges(25, 25), "+")
#' RNA <- GRanges("1", IRanges(1, 50), "+")
#' ribosomeReleaseScore(grl, RFP, threeUTRs, RNA)
#'
ribosomeReleaseScore <- function(grl, RFP, GtfOrThreeUtrs, RNA = NULL){
threeUTRs <- loadRegion(GtfOrThreeUtrs, part = "trailer")
# check that naming is correct, else change it.
orfNames <- txNames(grl, FALSE)
validNamesThree <- names(threeUTRs) %in% orfNames
validNamesGRL <- orfNames %in% names(threeUTRs)
rrs <- rep(NA,length(grl))
if (sum(validNamesGRL) != length(grl)) {
threeUTRs <- threeUTRs[validNamesThree]
grl <- grl[validNamesGRL]
}
overlapGrl <- countOverlaps(grl, RFP) + 1
threeUTRs <- threeUTRs[orfNames[validNamesGRL]]
overlapThreeUtrs <- countOverlaps(threeUTRs, RFP) + 1
rrs[validNamesGRL] <- (overlapGrl / widthPerGroup(grl)) /
(overlapThreeUtrs / widthPerGroup(threeUTRs))
if (!is.null(RNA)) { # normalize by rna ratio
rnaRatio <- (countOverlaps(grl, RNA) + 1) /
(countOverlaps(threeUTRs, RNA) + 1)
rrs[validNamesGRL] <- rrs[validNamesGRL] / rnaRatio
}
names(rrs) <- NULL
return(rrs)
}
#' Ribosome Stalling Score (RSS)
#'
#' Is defined as \preformatted{(RPFs over ORF stop sites)/(RPFs over ORFs)}
#' and normalized by lengths
#' A pseudo-count of one was added to both the ORF and downstream sums.
#' @references doi: 10.1016/j.cels.2017.08.004
#' @param grl a \code{\link{GRangesList}} object
#' with usually either leaders,
#' cds', 3' utrs or ORFs.
#' @param RFP RiboSeq reads as GAlignment, GRanges
#' or GRangesList object
#' @return a named vector of numeric values of RSS scores
#' @export
#' @family features
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25), "+")
#' ribosomeStallingScore(grl, RFP)
#'
ribosomeStallingScore <- function(grl, RFP){
grl_len <- widthPerGroup(grl, FALSE)
overlapGrl <- countOverlaps(grl, RFP)
stopCodons <- stopCodons(grl, is.sorted = TRUE)
overlapStop <- countOverlaps(stopCodons, RFP)
rss <- ((overlapStop + 1) / 3) / ((overlapGrl + 1) / grl_len)
names(rss) <- NULL
return(rss)
}
#' Start region coverage
#'
#' Get the number of reads in the start region of each ORF. If you want the
#' start codon coverage only, set upstream = 0. Standard is 2 upstream
#' and 2 downstream, a width 5 window centered at start site. since
#' p-shifting is not 100% accurate, this window is usually the reads from the
#' start site.
#'
#' If tx is null, then upstream will be force to 0 and downstream to
#' a maximum of grl width. Since there is no reference for splicing.
#' @param RFP ribo seq reads as GAlignment, GRanges or GRangesList object
#' @inheritParams startRegion
#' @family features
#' @return a numeric vector of counts
startRegionCoverage <- function(grl, RFP, tx = NULL, is.sorted = TRUE,
upstream = 2L, downstream = 2L) {
region <- startRegion(grl, tx, is.sorted, upstream, downstream)
return(countOverlaps(region, RFP))
}
#' Get initiation score for a GRangesList of ORFs
#'
#' initiationScore tries to check how much each TIS region resembles, the
#' average of the CDS TIS regions.
#'
#' Since this features uses a distance matrix for scoring, values are
#' distributed like this:
#' As result there is one value per ORF:
#' 0.000: means that ORF had no reads
#' -1.000: means that ORF is identical to average of CDS
#' 1.000: means that orf is maximum different than average of CDS
#' @references doi: 10.1186/s12915-017-0416-0
#' @param grl a \code{\link{GRangesList}} object with ORFs
#' @param cds a \code{\link{GRangesList}} object with coding sequences
#' @param tx a GrangesList of transcripts covering grl.
#' @param reads ribosomal footprints, given as Galignment object or
#' Granges
#' @param pShifted a logical (TRUE), are riboseq reads p-shifted?
#' @family features
#' @return an integer vector, 1 score per ORF, with names of grl
#' @export
#' @importFrom BiocGenerics Reduce
#' @examples
#' # Good hiting ORF
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(21, 40),
#' strand = "+")
#' names(ORF) <- c("tx1")
#' grl <- GRangesList(tx1 = ORF)
#' # 1 width p-shifted reads
#' reads <- GRanges("1", IRanges(c(21, 23, 50, 50, 50, 53, 53, 56, 59),
#' width = 1), "+")
#' score(reads) <- 28 # original width
#' cds <- GRanges(seqnames = "1",
#' ranges = IRanges(50, 80),
#' strand = "+")
#' cds <- GRangesList(tx1 = cds)
#' tx <- GRanges(seqnames = "1",
#' ranges = IRanges(1, 85),
#' strand = "+")
#' tx <- GRangesList(tx1 = tx)
#'
#' initiationScore(grl, cds, tx, reads, pShifted = TRUE)
#'
initiationScore <- function(grl, cds, tx, reads, pShifted = TRUE) {
if (length(grl) == 0) stop("grl must have length > 0")
# meta coverage of cds
cdsMeta <- windowPerReadLength(cds, tx, reads, pShifted = pShifted)
# coverage per ORF
prop <- windowPerReadLength(grl, tx, reads, pShifted = pShifted,
scoring = "fracPos")
# find a better scoring pattern
prop[, `:=` (dif = abs(score - cdsMeta$score))]
len <- length(unique(prop$fraction))
ans <- prop[, .(difPer = sum(dif)), by = list(fraction, genes)]
ans <- ans[, .(score = sum(difPer)/len - 1), by = list(genes)]$score
ans[is.na(ans) | is.nan(ans)] <- 0
names(ans) <- names(grl)
return(ans)
}
#' Get ORFscore for a GRangesList of ORFs
#'
#' ORFscore tries to check whether the first frame of the 3 possible frames in
#' an ORF has more reads than second and third frame.
#'
#' Pseudocode:
#' assume rff - is reads fraction in specific frame
#' \preformatted{ORFScore = log(rrf1 + rrf2 + rrf3)}
#' For all ORFs where rrf2 or rrf3 is bigger than rff1,
#' negate the resulting value.
#' \preformatted{ORFScore[rrf1Smaller] <- ORFScore[rrf1Smaller] * -1}
#'
#' As result there is one value per ORF:
#' Positive values say that the first frame have the most reads,
#' negative values say that the first frame does not have the most reads.
#' NOTE: If reads are not of width 1, then a read from 1-4 on range of 1-4,
#' will get scores frame1 = 2, frame2 = 1, frame3 = 1. What could be logical
#' is that only the 5' end is important, so that only frame1 = 1,
#' to get this, you first resize reads to 5'end only.
#'
#' NOTE: p shifting is not exact, so some functional ORFs will get a
#' bad ORF score.
#' @references doi: 10.1002/embj.201488411
#' @param grl a \code{\link{GRangesList}} object with ORFs
#' @param RFP ribosomal footprints, given as Galignment object,
#' Granges or GRangesList
#' @param is.sorted logical (F), is grl sorted.
#' @importFrom data.table .SD
#' @importFrom data.table .N
#' @family features
#' @export
#' @return a data.table with 4 columns, the orfscore (ORFScores) and score of
#' each of the 3 tiles (frame_zero_RP, frame_one_RP, frame_two_RP)
#' @examples
#' ORF <- GRanges(seqnames = "1",
#' ranges = IRanges(start = c(1, 10, 20), end = c(5, 15, 25)),
#' strand = "+")
#' names(ORF) <- c("tx1", "tx1", "tx1")
#' grl <- GRangesList(tx1_1 = ORF)
#' RFP <- GRanges("1", IRanges(25, 25), "+") # 1 width position based
#' score(RFP) <- 28 # original width
#' orfScore(grl, RFP) # negative because more hits on frames 1,2 than 0.
#'
#' # example with positive result, more hits on frame 0 (in frame of ORF)
#' RFP <- GRanges("1", IRanges(c(1, 1, 1, 25), width = 1), "+")
#' score(RFP) <- c(28, 29, 31, 28) # original width
#' orfScore(grl, RFP)
#'
orfScore <- function(grl, RFP, is.sorted = FALSE) {
if (any(widthPerGroup(grl, FALSE) < 3)) stop("width < 3 ORFs not allowed")
counts <- coveragePerTiling(grl, RFP, is.sorted, as.data.table = TRUE,
withFrames = TRUE)
total <- coverageScorings(counts, scoring = "frameSum")
countsTile1 <- total[frame == 0,]$score
countsTile2 <- total[frame == 1,]$score
countsTile3 <- total[frame == 2,]$score
RP = countsTile1 + countsTile2 + countsTile3
Ftotal <- RP/3
frame1 <- (countsTile1 - Ftotal)^2 / Ftotal
frame2 <- (countsTile2 - Ftotal)^2 / Ftotal
frame3 <- (countsTile3 - Ftotal)^2 / Ftotal
dfORFs <- data.table(frame_zero_RP = countsTile1)
dfORFs[, frame_one_RP := countsTile2]
dfORFs[, frame_two_RP := countsTile3]
ORFscore <- log2(frame1 + frame2 + frame3 + 1)
revORFscore <- which(frame1 < frame2 | frame1 < frame3)
ORFscore[revORFscore] <- -1 * ORFscore[revORFscore]
ORFscore[is.na(ORFscore)] <- 0
dfORFs$ORFScores <- ORFscore
dfORFs[] # for print
return(dfORFs)
}
|
#! /usr/bin/env Rscript
# cluster_lineages.R
# This script takes input fitnesses and
# generates cluters for each source-ploidy lineage set.
#
# The script also calculates the cluster-wise weighted (e.g., Maha) mean and se.
# and outputs this as a second file
# ------------------------- #
# header #
# ------------------------- #
suppressWarnings(suppressMessages(library(dplyr)))
suppressWarnings(suppressMessages(library(tidyr)))
suppressWarnings(suppressMessages(library(docopt)))
source(file.path("scripts/src/pleiotropy_functions.R"))
# ------------------------- #
# function definitions #
# ------------------------- #
get_neutral_tree_depth <- function(df, method = "euclidean", quant = 0.95) {
df %>%
dist(method = method) %>%
hclust() ->
neut_clust
q <- stats::quantile(neut_clust$height, prob = quant)
return(as.numeric(q))
}
get_clusters_by_cut <- function(df = NULL, method = "euclidean", cut_at = NULL) {
stopifnot(!is.null(cut_at) & !is.null(df))
df <- df[complete.cases(df), ]
df %>%
dist(method = method) %>%
hclust() %>%
cutree(h = cut_at) ->
cuts
return(data.frame(Full.BC = row.names(df),
cluster = cuts,
stringsAsFactors = FALSE))
}
get_neutral_cov_matr <- function(mat) {
cov(mat)
}
maha_mean <- function(x, x_var, covm) {
if (all(is.vector(x), is.vector(x_var))) {
sigma_hat <- covm
diag(sigma_hat) <- x_var
return(list(mean = x,
covm = sigma_hat))
}
for (i in seq_along(nrow(x))) {
sigma_i <- covm
diag(sigma_i) <- x_var[i, ]
sigma_i_inv <- solve(sigma_i)
maha_x <- sigma_i_inv %*% x[i, ]
if (i == 1) {
sigma_hat_inv <- sigma_i_inv
maha_x_tot <- maha_x
} else {
sigma_hat_inv <- sigma_hat_inv + sigma_i_inv
maha_x_tot <- maha_x_tot + maha_x
}
sigma_hat <- solve(sigma_hat_inv)
maha_mean_vec <- sigma_hat %*% maha_x_tot
maha_mean_vec2 <- c(maha_mean_vec)
names(maha_mean_vec2) <- row.names(maha_mean_vec)
maha_mean_vec <- maha_mean_vec2
}
return(list(mean = maha_mean_vec,
covm = sigma_hat))
}
calc_clust_dist <- function(...) {
args <- list(...)
stopifnot(all(c("C_i", "C_j", "dist_fun", "covm") %in% names(args)))
stopifnot(exists(args$dist_fun))
dist_fun <- get(args$dist_fun)
x_i <- dist_fun(x = args$C_i$x_i, x_var = args$C_i$x_i_var, covm = args$covm)
x_j <- dist_fun(x = args$C_j$x_j, x_var = args$C_j$x_j_var, covm = args$covm)
d_ij = t(x_i$mean - x_j$mean) %*%
solve(x_i$covm + x_j$covm) %*%
(x_i$mean - x_j$mean)
# equivalent to mahalanobis(x_i$mean, x_j$mean, cov = x_i$covm + x_j$covm)
return(d_ij)
}
numerize <- function(x) {
if (nrow(x) == 1){
return(as.vector(x))
} else {
return(as.matrix(x))
}
}
reduce_clusters_by_dist <- function(clust_vec = NULL,
mean_matr = NULL,
var_matr = NULL,
covm = NULL,
se = FALSE,
dist_fun = "maha_mean",
p_cutoff = 0.99) {
stopifnot(all(!is.null(covm), !is.null(mean_matr), !is.null(var_matr)))
df <- ncol(mean_matr)
dist_cutoff <- qchisq(p = p_cutoff, df = df)
if (is.null(clust_vec)) {
clust_vec <- seq_len(dim(mean_matr)[1])
} else {
stopifnot(is.vector(clust_vec) &
length(clust_vec) == nrow(mean_matr) &
nrow(mean_matr) == nrow(var_matr))
}
if (se == TRUE) {
var_matr <- var_matr^2
}
while (length(unique(clust_vec)) > 1) {
DISTS <- list()
clust_ids <- unique(clust_vec)
clust_vec %>%
plyr::mapvalues(from = clust_ids,
to = seq_len(length(unique(clust_vec)))) ->
clust_vec
n_clusts <- length(unique(clust_vec))
clust_ids <- unique(clust_vec)
cat(sprintf("\tNum. clusters = %s\n", n_clusts))
# calculate pairwise dist between each cluster
for (i in 1:(n_clusts - 1)) {
if (i %in% clust_vec == FALSE) {
i <- i + 1
}
c_i <- clust_ids[i]
x_i <- as.matrix(mean_matr[clust_vec == c_i, ])
x_i_var <- as.matrix(var_matr[clust_vec == c_i, ])
C_i <- list(x_i = x_i,
x_i_var = x_i_var)
for (j in (i + 1):n_clusts) {
if (j %in% clust_vec == FALSE) {
j <- j + 1
}
c_j <- clust_ids[j]
x_j <- as.matrix(mean_matr[clust_vec == c_j, ])
x_j_var <- as.matrix(var_matr[clust_vec == c_j, ])
C_j <- list(x_j = x_j,
x_j_var = x_j_var)
# calc dist
d_ij = calc_clust_dist(C_i = C_i,
C_j = C_j,
covm = covm,
dist_fun = "maha_mean")
DISTS <- append(DISTS, list(data.frame(i = c_i, j = c_j, d_ij = d_ij)))
}
}
D_res <- do.call(rbind, DISTS)
D_min <- min(D_res$d_ij)
if (D_min >= dist_cutoff) {
break
}
row_min <- which(D_res$d_ij == D_min)
clust_vec[clust_vec %in% c(D_res$i[row_min],
D_res$j[row_min])] <- D_res$i[row_min]
}
cat("Done!\n")
if (all(!is.null(row.names(mean_matr)), !is.null(row.names(var_matr)))) {
res <- data.frame(Full.BC = row.names(mean_matr),
cluster = clust_vec,
stringsAsFactors = FALSE)
return(res)
}
return(clust_vec)
}
average_clusters <- function(clusters, mean_matr, var_matr, covm) {
if (is.data.frame(clusters) & "cluster" %in% names(clusters)) {
clust_ids <- clusters$cluster
} else {
clust_ids <- clusters
}
df_full <- data.frame()
for (clust in seq_along(unique(clust_ids))) {
clust_mean <- maha_mean(x = as.matrix(mean_matr[clust_ids == clust, ]),
x_var = as.matrix(var_matr[clust_ids == clust, ]),
covm = covm)
names(clust_mean$mean) %>%
sapply(function(x) {
x %>%
strsplit("\\.") %>%
unlist() %>%
dplyr::first()
}) -> envs
names(clust_mean$mean) <- envs
data.frame(bfa_env = names(clust_mean$mean),
s = clust_mean$mean,
s_se = sqrt(diag(clust_mean$covm)),
cluster = clust,
n_bcs = length(clust_ids[clust_ids == clust])) ->
df_tmp
df_full %>%
dplyr::bind_rows(df_tmp) ->
df_full
}
return(df_full)
}
# ------------------------- #
# main def #
# ------------------------- #
main <- function(arguments) {
# check + grab input files
if (sum(!sapply(arguments$infiles, file.exists)) > 0) {
stop(
"One or more infile does not exist. Please check infile path and try again.",
call. = FALSE
)
}
# read infiles
infile <- read.table(arguments$infile,
sep = ",",
header = T,
stringsAsFactors = F)
# grab barcodes to retain after filtering:
adapted_df <-
infile %>%
dplyr::filter(Subpool.Environment != "not_read") %>%
filter_to_focal_bcs(
retain_neutrals = FALSE,
retain_adapteds = TRUE,
retain_autodips = FALSE
)
neutral_df <-
infile %>%
filter_to_focal_bcs(
retain_neutrals = TRUE,
retain_adapteds = FALSE,
retain_autodips = FALSE
)
# find tree depth of neutrals with mahalanobis distance:
neutral_df %>%
prep_fit_matrix(means_only = FALSE,
excludes = arguments$exclude,
iva_s = arguments$use_iva,
gens = arguments$gens) ->
neutral_df_matr
neutral_df_matr$means %>%
get_neutral_tree_depth(quant = 0.95) ->
neutral_clust_height
neutral_df_matr$means %>%
get_neutral_cov_matr() ->
neutral_cov
# use neutral tree depth
# to define initial clusters for each environment
adapted_df %>%
split(adapted_df$Subpool.Environment) ->
adapted_by_env
adapted_df_w_clust <- list()
clust_wise_means <- list()
for (env in seq_along(adapted_by_env)) {
cat(sprintf("\nWorking on env %s...\n", names(adapted_by_env)[env]))
adapted_by_env[[env]] %>%
prep_fit_matrix(means_only = FALSE,
excludes = arguments$exclude,
iva_s = arguments$use_iva,
gens = arguments$gens) ->
matr_prepped
if (all(lapply(matr_prepped, nrow) == 1)) {
clusts_final <- data.frame(Full.BC = row.names(matr_prepped$means),
cluster = "1",
stringsAsFactors = FALSE)
} else {
matr_prepped$means %>%
get_clusters_by_cut(cut_at = neutral_clust_height) ->
clusts_initial
clusts_initial$cluster %>%
reduce_clusters_by_dist(mean_matr = matr_prepped$means,
var_matr = matr_prepped$sigmas^2,
covm = neutral_cov) ->
clusts_final
}
adapted_by_env[[env]] %>%
dplyr::left_join(clusts_final, by = "Full.BC") ->
adapted_df_w_clust[[env]]
clusts_final %>%
average_clusters(mean_matr = matr_prepped$means,
var_matr = matr_prepped$sigmas^2,
covm = neutral_cov) ->
clust_wise_mean
clust_wise_mean$source <- names(adapted_by_env)[env]
clust_wise_means[[env]] <- clust_wise_mean
}
adapted_df_w_clust_full <- do.call(rbind, adapted_df_w_clust)
clust_wise_means_full <- do.call(rbind, clust_wise_means)
adapted_df_w_clust_full %>%
write_out(out_dir = arguments$outdir,
base_name = basename(arguments$infile),
str_to_append = "_adapted_w_clusts")
clust_wise_means_full %>%
write_out(out_dir = arguments$outdir,
base_name = basename(arguments$infile),
str_to_append = "_adapted_w_clust_means")
}
# ------------------------- #
# main #
# ------------------------- #
"cluster_lineages.R
Usage:
cluster_lineages.R [--help]
cluster_lineages.R [options] <infile>
Options:
-h --help Show this screen.
-o --outdir=<outdir> Output directory [default: ./]
-u --use_iva Flag to determine whether to use inverse variance weighted avg or arithmentic avg [default: TRUE]
-g --gens=<gens> Number of generations per cycle (used to divide input fitness estimates) [default: 8]
-e --exclude=<env>... Space-separated list of environments to exclude from neutral set calculations
Arguments:
infile Input file(s) containing fitness calls for BFA run.
" -> doc
# define default args for debug_status == TRUE
arguments <- list(
use_iva = TRUE,
infile = "data/fitness_data/fitness_calls/hBFA1_cutoff-5_adapteds_autodips.csv",
outdir = "data/fitness_data/fitness_calls",
gens = "8",
exclude = "CLM|FLC4|Stan|48Hr"
)
debug_status <- FALSE
arguments <- run_args_parse(arguments, debug_status)
cat("\n**********************\n")
cat("* cluster_lineages.R *\n")
cat("**********************\n\n")
main(arguments)
cat("**Script completed successfully!**\n\n")
| /scripts/cluster_lineages.R | no_license | phumph/pleiotropy | R | false | false | 11,374 | r | #! /usr/bin/env Rscript
# cluster_lineages.R
# This script takes input fitnesses and
# generates cluters for each source-ploidy lineage set.
#
# The script also calculates the cluster-wise weighted (e.g., Maha) mean and se.
# and outputs this as a second file
# ------------------------- #
# header #
# ------------------------- #
suppressWarnings(suppressMessages(library(dplyr)))
suppressWarnings(suppressMessages(library(tidyr)))
suppressWarnings(suppressMessages(library(docopt)))
source(file.path("scripts/src/pleiotropy_functions.R"))
# ------------------------- #
# function definitions #
# ------------------------- #
get_neutral_tree_depth <- function(df, method = "euclidean", quant = 0.95) {
df %>%
dist(method = method) %>%
hclust() ->
neut_clust
q <- stats::quantile(neut_clust$height, prob = quant)
return(as.numeric(q))
}
get_clusters_by_cut <- function(df = NULL, method = "euclidean", cut_at = NULL) {
stopifnot(!is.null(cut_at) & !is.null(df))
df <- df[complete.cases(df), ]
df %>%
dist(method = method) %>%
hclust() %>%
cutree(h = cut_at) ->
cuts
return(data.frame(Full.BC = row.names(df),
cluster = cuts,
stringsAsFactors = FALSE))
}
get_neutral_cov_matr <- function(mat) {
cov(mat)
}
maha_mean <- function(x, x_var, covm) {
if (all(is.vector(x), is.vector(x_var))) {
sigma_hat <- covm
diag(sigma_hat) <- x_var
return(list(mean = x,
covm = sigma_hat))
}
for (i in seq_along(nrow(x))) {
sigma_i <- covm
diag(sigma_i) <- x_var[i, ]
sigma_i_inv <- solve(sigma_i)
maha_x <- sigma_i_inv %*% x[i, ]
if (i == 1) {
sigma_hat_inv <- sigma_i_inv
maha_x_tot <- maha_x
} else {
sigma_hat_inv <- sigma_hat_inv + sigma_i_inv
maha_x_tot <- maha_x_tot + maha_x
}
sigma_hat <- solve(sigma_hat_inv)
maha_mean_vec <- sigma_hat %*% maha_x_tot
maha_mean_vec2 <- c(maha_mean_vec)
names(maha_mean_vec2) <- row.names(maha_mean_vec)
maha_mean_vec <- maha_mean_vec2
}
return(list(mean = maha_mean_vec,
covm = sigma_hat))
}
calc_clust_dist <- function(...) {
args <- list(...)
stopifnot(all(c("C_i", "C_j", "dist_fun", "covm") %in% names(args)))
stopifnot(exists(args$dist_fun))
dist_fun <- get(args$dist_fun)
x_i <- dist_fun(x = args$C_i$x_i, x_var = args$C_i$x_i_var, covm = args$covm)
x_j <- dist_fun(x = args$C_j$x_j, x_var = args$C_j$x_j_var, covm = args$covm)
d_ij = t(x_i$mean - x_j$mean) %*%
solve(x_i$covm + x_j$covm) %*%
(x_i$mean - x_j$mean)
# equivalent to mahalanobis(x_i$mean, x_j$mean, cov = x_i$covm + x_j$covm)
return(d_ij)
}
numerize <- function(x) {
if (nrow(x) == 1){
return(as.vector(x))
} else {
return(as.matrix(x))
}
}
reduce_clusters_by_dist <- function(clust_vec = NULL,
mean_matr = NULL,
var_matr = NULL,
covm = NULL,
se = FALSE,
dist_fun = "maha_mean",
p_cutoff = 0.99) {
stopifnot(all(!is.null(covm), !is.null(mean_matr), !is.null(var_matr)))
df <- ncol(mean_matr)
dist_cutoff <- qchisq(p = p_cutoff, df = df)
if (is.null(clust_vec)) {
clust_vec <- seq_len(dim(mean_matr)[1])
} else {
stopifnot(is.vector(clust_vec) &
length(clust_vec) == nrow(mean_matr) &
nrow(mean_matr) == nrow(var_matr))
}
if (se == TRUE) {
var_matr <- var_matr^2
}
while (length(unique(clust_vec)) > 1) {
DISTS <- list()
clust_ids <- unique(clust_vec)
clust_vec %>%
plyr::mapvalues(from = clust_ids,
to = seq_len(length(unique(clust_vec)))) ->
clust_vec
n_clusts <- length(unique(clust_vec))
clust_ids <- unique(clust_vec)
cat(sprintf("\tNum. clusters = %s\n", n_clusts))
# calculate pairwise dist between each cluster
for (i in 1:(n_clusts - 1)) {
if (i %in% clust_vec == FALSE) {
i <- i + 1
}
c_i <- clust_ids[i]
x_i <- as.matrix(mean_matr[clust_vec == c_i, ])
x_i_var <- as.matrix(var_matr[clust_vec == c_i, ])
C_i <- list(x_i = x_i,
x_i_var = x_i_var)
for (j in (i + 1):n_clusts) {
if (j %in% clust_vec == FALSE) {
j <- j + 1
}
c_j <- clust_ids[j]
x_j <- as.matrix(mean_matr[clust_vec == c_j, ])
x_j_var <- as.matrix(var_matr[clust_vec == c_j, ])
C_j <- list(x_j = x_j,
x_j_var = x_j_var)
# calc dist
d_ij = calc_clust_dist(C_i = C_i,
C_j = C_j,
covm = covm,
dist_fun = "maha_mean")
DISTS <- append(DISTS, list(data.frame(i = c_i, j = c_j, d_ij = d_ij)))
}
}
D_res <- do.call(rbind, DISTS)
D_min <- min(D_res$d_ij)
if (D_min >= dist_cutoff) {
break
}
row_min <- which(D_res$d_ij == D_min)
clust_vec[clust_vec %in% c(D_res$i[row_min],
D_res$j[row_min])] <- D_res$i[row_min]
}
cat("Done!\n")
if (all(!is.null(row.names(mean_matr)), !is.null(row.names(var_matr)))) {
res <- data.frame(Full.BC = row.names(mean_matr),
cluster = clust_vec,
stringsAsFactors = FALSE)
return(res)
}
return(clust_vec)
}
average_clusters <- function(clusters, mean_matr, var_matr, covm) {
if (is.data.frame(clusters) & "cluster" %in% names(clusters)) {
clust_ids <- clusters$cluster
} else {
clust_ids <- clusters
}
df_full <- data.frame()
for (clust in seq_along(unique(clust_ids))) {
clust_mean <- maha_mean(x = as.matrix(mean_matr[clust_ids == clust, ]),
x_var = as.matrix(var_matr[clust_ids == clust, ]),
covm = covm)
names(clust_mean$mean) %>%
sapply(function(x) {
x %>%
strsplit("\\.") %>%
unlist() %>%
dplyr::first()
}) -> envs
names(clust_mean$mean) <- envs
data.frame(bfa_env = names(clust_mean$mean),
s = clust_mean$mean,
s_se = sqrt(diag(clust_mean$covm)),
cluster = clust,
n_bcs = length(clust_ids[clust_ids == clust])) ->
df_tmp
df_full %>%
dplyr::bind_rows(df_tmp) ->
df_full
}
return(df_full)
}
# ------------------------- #
# main def #
# ------------------------- #
main <- function(arguments) {
# check + grab input files
if (sum(!sapply(arguments$infiles, file.exists)) > 0) {
stop(
"One or more infile does not exist. Please check infile path and try again.",
call. = FALSE
)
}
# read infiles
infile <- read.table(arguments$infile,
sep = ",",
header = T,
stringsAsFactors = F)
# grab barcodes to retain after filtering:
adapted_df <-
infile %>%
dplyr::filter(Subpool.Environment != "not_read") %>%
filter_to_focal_bcs(
retain_neutrals = FALSE,
retain_adapteds = TRUE,
retain_autodips = FALSE
)
neutral_df <-
infile %>%
filter_to_focal_bcs(
retain_neutrals = TRUE,
retain_adapteds = FALSE,
retain_autodips = FALSE
)
# find tree depth of neutrals with mahalanobis distance:
neutral_df %>%
prep_fit_matrix(means_only = FALSE,
excludes = arguments$exclude,
iva_s = arguments$use_iva,
gens = arguments$gens) ->
neutral_df_matr
neutral_df_matr$means %>%
get_neutral_tree_depth(quant = 0.95) ->
neutral_clust_height
neutral_df_matr$means %>%
get_neutral_cov_matr() ->
neutral_cov
# use neutral tree depth
# to define initial clusters for each environment
adapted_df %>%
split(adapted_df$Subpool.Environment) ->
adapted_by_env
adapted_df_w_clust <- list()
clust_wise_means <- list()
for (env in seq_along(adapted_by_env)) {
cat(sprintf("\nWorking on env %s...\n", names(adapted_by_env)[env]))
adapted_by_env[[env]] %>%
prep_fit_matrix(means_only = FALSE,
excludes = arguments$exclude,
iva_s = arguments$use_iva,
gens = arguments$gens) ->
matr_prepped
if (all(lapply(matr_prepped, nrow) == 1)) {
clusts_final <- data.frame(Full.BC = row.names(matr_prepped$means),
cluster = "1",
stringsAsFactors = FALSE)
} else {
matr_prepped$means %>%
get_clusters_by_cut(cut_at = neutral_clust_height) ->
clusts_initial
clusts_initial$cluster %>%
reduce_clusters_by_dist(mean_matr = matr_prepped$means,
var_matr = matr_prepped$sigmas^2,
covm = neutral_cov) ->
clusts_final
}
adapted_by_env[[env]] %>%
dplyr::left_join(clusts_final, by = "Full.BC") ->
adapted_df_w_clust[[env]]
clusts_final %>%
average_clusters(mean_matr = matr_prepped$means,
var_matr = matr_prepped$sigmas^2,
covm = neutral_cov) ->
clust_wise_mean
clust_wise_mean$source <- names(adapted_by_env)[env]
clust_wise_means[[env]] <- clust_wise_mean
}
adapted_df_w_clust_full <- do.call(rbind, adapted_df_w_clust)
clust_wise_means_full <- do.call(rbind, clust_wise_means)
adapted_df_w_clust_full %>%
write_out(out_dir = arguments$outdir,
base_name = basename(arguments$infile),
str_to_append = "_adapted_w_clusts")
clust_wise_means_full %>%
write_out(out_dir = arguments$outdir,
base_name = basename(arguments$infile),
str_to_append = "_adapted_w_clust_means")
}
# ------------------------- #
# main #
# ------------------------- #
"cluster_lineages.R
Usage:
cluster_lineages.R [--help]
cluster_lineages.R [options] <infile>
Options:
-h --help Show this screen.
-o --outdir=<outdir> Output directory [default: ./]
-u --use_iva Flag to determine whether to use inverse variance weighted avg or arithmentic avg [default: TRUE]
-g --gens=<gens> Number of generations per cycle (used to divide input fitness estimates) [default: 8]
-e --exclude=<env>... Space-separated list of environments to exclude from neutral set calculations
Arguments:
infile Input file(s) containing fitness calls for BFA run.
" -> doc
# define default args for debug_status == TRUE
arguments <- list(
use_iva = TRUE,
infile = "data/fitness_data/fitness_calls/hBFA1_cutoff-5_adapteds_autodips.csv",
outdir = "data/fitness_data/fitness_calls",
gens = "8",
exclude = "CLM|FLC4|Stan|48Hr"
)
debug_status <- FALSE
arguments <- run_args_parse(arguments, debug_status)
cat("\n**********************\n")
cat("* cluster_lineages.R *\n")
cat("**********************\n\n")
main(arguments)
cat("**Script completed successfully!**\n\n")
|
library(data.table)
df <- fread("household_power_consumption.txt", sep=";")
dfa <- df[grep("[12]/2/2007",df$Date)]
a <- paste(dfa$Date,dfa$Time,sep=" ")
b <- strptime(a,"%d/%m/%Y %H:%M:%S")
t1 <- strptime("1/2/2007 00:00:00","%d/%m/%Y %H:%M:%S")
t2 <- strptime("2/2/2007 23:59:59","%d/%m/%Y %H:%M:%S")
d <- (t1<=b & b <=t2)
dfs<-dfa[d]
x <- strptime(paste(dfs$Date,dfs$Time,sep=" "),"%d/%m/%Y %H:%M:%S")
Gap <- as.numeric(dfs$Global_active_power)
Voltage <- as.numeric(dfs$Voltage)
Grp <- as.numeric(dfs$Global_reactive_power)
sm1 <- as.numeric(dfs$Sub_metering_1)
sm2 <- as.numeric(dfs$Sub_metering_2)
sm3 <- as.numeric(dfs$Sub_metering_3)
par(mfrow = c(2,2))
plot(x,Gap,type="l",ylab = "Global Active Power",xlab = NA)
plot(x,Voltage,type="l",xlab = NA)
plot(x, sm1, type="l",col="black",ylab = "Energy Sub metering",xlab = NA)
lines(x,sm2,col="red")
lines(x,sm3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty = 1,cex=0.2)
plot(x,Grp,type="l",xlab = "datetime", ylab = "Global_reactive_power")
dev.copy(png,file="plot4.png")
dev.off() | /plot4.R | no_license | bjwwm013/ExData_Plotting1 | R | false | false | 1,112 | r | library(data.table)
df <- fread("household_power_consumption.txt", sep=";")
dfa <- df[grep("[12]/2/2007",df$Date)]
a <- paste(dfa$Date,dfa$Time,sep=" ")
b <- strptime(a,"%d/%m/%Y %H:%M:%S")
t1 <- strptime("1/2/2007 00:00:00","%d/%m/%Y %H:%M:%S")
t2 <- strptime("2/2/2007 23:59:59","%d/%m/%Y %H:%M:%S")
d <- (t1<=b & b <=t2)
dfs<-dfa[d]
x <- strptime(paste(dfs$Date,dfs$Time,sep=" "),"%d/%m/%Y %H:%M:%S")
Gap <- as.numeric(dfs$Global_active_power)
Voltage <- as.numeric(dfs$Voltage)
Grp <- as.numeric(dfs$Global_reactive_power)
sm1 <- as.numeric(dfs$Sub_metering_1)
sm2 <- as.numeric(dfs$Sub_metering_2)
sm3 <- as.numeric(dfs$Sub_metering_3)
par(mfrow = c(2,2))
plot(x,Gap,type="l",ylab = "Global Active Power",xlab = NA)
plot(x,Voltage,type="l",xlab = NA)
plot(x, sm1, type="l",col="black",ylab = "Energy Sub metering",xlab = NA)
lines(x,sm2,col="red")
lines(x,sm3,col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty = 1,cex=0.2)
plot(x,Grp,type="l",xlab = "datetime", ylab = "Global_reactive_power")
dev.copy(png,file="plot4.png")
dev.off() |
#
# use readLines() to read in lists of file names
format.for.condor <- function(list.of.clone.files, list.of.count.files) {
formatted.vector <- NULL;
output.file.names <- NULL;
# strip extensions from file names, for output file names
for(i in 1:length(list.of.clone.files)) {
output.file.names[i] <- sub("[.][^.]*$", "", list.of.clone.files[i]);
} # for
# TODO - verify paralellity of input files
for (i in 1:length(list.of.clone.files)) {
formatted.vector[i] <- paste(
"output=$(log_dir)stdout_normalize_clonotypes_", i, ".out\n",
"error=$(log_dir)stderr_normalize_clonotypes_", i, ".out\n",
"log=$(log_dir)condor_normalize_clonotypes_", i, ".log\n",
"arguments=$(script_dir)normalize.clonotype.counts.condor.R ",
"$(mixcr_dir)exported/", list.of.clone.files[i], " ", # clone input
"$(count_dir)", list.of.count.files[i], " ", # count input
"$(mixcr_dir)normalized_clones/ ", # ouput directory
"$(mixcr_dir)reference/scaling_factor.txt",
"\nqueue 1\n",
sep="");
} # while
write.table(formatted.vector,
file="formatted_for_normalize_clonotype_counts.txt",
row.names = FALSE,
col.names = FALSE,
quote = FALSE);
} # format.for.condor()
| /condor_tools/orig/format.normalize.condor.R | no_license | weshorton/tcr_sequencing_tools | R | false | false | 1,420 | r |
#
# use readLines() to read in lists of file names
format.for.condor <- function(list.of.clone.files, list.of.count.files) {
formatted.vector <- NULL;
output.file.names <- NULL;
# strip extensions from file names, for output file names
for(i in 1:length(list.of.clone.files)) {
output.file.names[i] <- sub("[.][^.]*$", "", list.of.clone.files[i]);
} # for
# TODO - verify paralellity of input files
for (i in 1:length(list.of.clone.files)) {
formatted.vector[i] <- paste(
"output=$(log_dir)stdout_normalize_clonotypes_", i, ".out\n",
"error=$(log_dir)stderr_normalize_clonotypes_", i, ".out\n",
"log=$(log_dir)condor_normalize_clonotypes_", i, ".log\n",
"arguments=$(script_dir)normalize.clonotype.counts.condor.R ",
"$(mixcr_dir)exported/", list.of.clone.files[i], " ", # clone input
"$(count_dir)", list.of.count.files[i], " ", # count input
"$(mixcr_dir)normalized_clones/ ", # ouput directory
"$(mixcr_dir)reference/scaling_factor.txt",
"\nqueue 1\n",
sep="");
} # while
write.table(formatted.vector,
file="formatted_for_normalize_clonotype_counts.txt",
row.names = FALSE,
col.names = FALSE,
quote = FALSE);
} # format.for.condor()
|
#!/usr/bin/env Rscript
# pluta 11/7/19
# this script was written ad-hoc as analysis developed, a finalized version
# will be much more organized
#rm(list = ls())
#args = commandArgs(trailingOnly = TRUE)
#if( length(args) < 1)
#{
# stop('need to provide arguments: FILENAME, the reference snp of the credset')
#}
INFILENAME=args[1]
len <- nchar(INFILENAME)
if( substr(INFILENAME, len, len) == "/" )
{
INFILENAME <- substr(INFILENAME, 1, len - 1)
}
# each locus is computed independently; run script per locus
# ENCSR418RNI; ENCSR886NTH; ENCSR303XKE; ENCSR898RGU all annotate identically
# drop three of these
# ================== preprocesser =================== #
# ---------------------------- libraries ------------- #
library(data.table)
library(ggplot2)
library(IRanges)
library(BiocManager)
setwd("/Users/johnpluta/Documents/nathansonlab/tecac-manuscript/annotation")
# --------------------------------------------------- #
# ----- header definitions ----- #
methyl.bed.header <- c("Chrom", "Start", "End", "Name", "Score", "strand","Start2", "End2", "rgb", "count", "percentMeth")
narrowpeak.bed.header <- c("Chrom", "Start", "End", "name", "Score", "strand", "signalVal", "-log10(p)", "-log10(q)", "peak")
broadpeak.bed.header <- c("Chrom", "Start", "End", "name", "Score", "strand", "signalVal", "p", "q")
cis.reg.header <- c("Chrom", "Start", "End", "Cis.Reg.Name", "Score", "strand", "Start2", "End2", "color")
chr.in.order <- c(paste("chr", c(seq(1:22), "X"), sep = ""))
# ----------------------------- #
# =================================================== #
# ================== functions =========================== #
# ---------------- getLRT.p ----------------------------- #
# get the p-value for the likelihood ratio test between the baseline
# model and the model with selected covariate(s)
getLRT.p <- function( logBF.Base, logBF.Model )
# input: logBF.Base, logBF.model (numeric): log(baye's factor) from
# the two models- this comes from PAINTOR
# output: p-value (numeric), p-value for the LRT between the two models
{
# LRT ~ x^2 with 1df
return( 1 - pchisq( -2 * (logBF.Base - logBF.Model), 1))
}
# -------------------------------------------------------- #
# ----------------------- inEqtl ------------------------- #
# check if a crv is in an eqtl
inEqtl <- function( pos, eqtl )
# input: pos, snp position (hg38)
# eqtl: eqtl position
# output: ind (logical), logical vector of which eqtls are in the crv
{
return( eqtl$LD.block.end >= pos & eqtl$LD.block.start <= pos )
}
# -------------------------------------------------------- #
# -------------------- snpAnnotSimple -------------------- #
# simple version of annotate snp that returns a logical vector
# no longer need to convert the output to logical
snpAnnotSimple <- function( pos, bed, chr )
# input: pos (integer), snp poistion
# bed (data.frame), data.frame of the bed file that is
# being annotated from
# chr (integer), the chromosome of interest
# output: ind (logical), a vector of which crvs overlap with
# annotation
{
bed <- bed[ bed$V1 == chr,]
rangeA <- IRanges( pos, pos )
rangeB <- IRanges(bed$V2, bed$V3)
ind <- findOverlaps(rangeA, rangeB, type = "within")
return(ind@from)
}
# -------------------------------------------------------- #
# ----------------------- replaceNA ---------------------- #
# annotation matrix cannot have NA or paintor will crash
# for annotation, it is sufficient to replace NA with 0
replaceNA <- function(x)
{
x[is.na(x)] <- 0
return(x)
}
# --------------------------------------------------------- #
# --------------- joinMethylReplicates ------------------- #
# in methylation data, if there are two sets of data, truncate
# data to only those that appear in both sets. returns a single set of
# data
joinMethylReplicates <- function( dat1, dat2, chr = NULL)
# dat1 (data.frame), bed file of replicate 1
# dat2 (data.frame), bed file or replicate 2
# chr (integer), if chr is NULL, run genome wide; else subset
{
out <- c()
if( is.null(chr))
{
tmp1 = dat1
tmp2 = dat2
} else
{
tmp1 <- dat1[ dat1$Chrom == chr, ]
tmp2 <- dat2[ dat2$Chrom == chr, ]
}
tmp.out <- merge(tmp1, tmp2, by.x = "Start", by.y = "Start")
tmp.out <- tmp.out[ !is.na(tmp.out$Chrom.x),]
if( "percentMeth.x" %in% colnames(tmp.out))
{
tmp.out <- tmp.out[,colnames(tmp.out) %in% c("Start", "Chrom.x", "End.x", "percentMeth.x")]
colnames(tmp.out) <- c("Start", "Chrom", "End", "percentMeth")
} else
{
tmp.out <- tmp.out[,colnames(tmp.out) %in% c("Start", "Chrom.x", "End.x", "q.x")]
colnames(tmp.out) <- c("Start", "Chrom", "End", "q")
}
out <- rbind(tmp.out, out)
return(out)
}
# ------------------------------------------------------------#
# ---------------- attachSnpAnnotMethyl ------------------- #
# attach features based on snp id/position
attachSnpAnnotMethyl <- function( pos, dat, chr, varname )
# only slightly different than attachSnpAnnot, to account for different
# formatting in methylation files; probably a smarter way to do this,
# or roll both functions into attachSnpAnnotSimple if we only care about
# binary values
{
if( is.null(pos) )
{
stop("pos has null value, did you pass the right attribute?")
}
# subset the functional data so we only pull snps in the same chromosome
# as start
dat <- dat[dat$Chrom == chr,]
if( !(varname %in% colnames(dat)))
{
print(paste(varname, " not found in data.", sep = ""))
stop()
}
# find snps in range
ind <- dat$start %in% pos
if( length(ind) > 0)
{
x <- dat[[varname]][ind]
# one snp can map to multiple genes (?); need to concatenate into one var
if(length(x) > 1)
{
print(paste("Start position:", pos, "had", length(x), "matches", sep = " "))
x <- paste(dat[[varname]][ind], collapse = ";")
}
return( x )
}
return(NA)
}
# ----------------------------------------------------------- #
# ---------------- attachSnpAnnot ------------------- #
# attach features based on snp id/position
attachSnpAnnot <- function( pos, dat, chr, varname )
# input:
# pos (integer), position of snp from the credible set
# dat (data.frame), the bed file data
# chr (integer), chromosome to subset by
# varname (string), which attribute do we want to return?
# this only matters if not using a binary value
#
# output: return varname values that are in CRV regions
{
if( is.null(pos) )
{
stop("pos has null value, did you pass the right attribute?")
}
# subset the functional data so we only pull snps in the same chromosome
# as start
dat <- dat[dat$Chrom == chr,]
if( !(varname %in% colnames(dat)))
{
print(paste(varname, " not found in data.", sep = ""))
stop()
}
# find snps in range
ind = which(dat$Start <= pos & dat$End >= pos)
if( length(ind) > 0)
{
x <- dat[[varname]][ind]
# one snp can map to multiple genes (?); need to concatenate into one var
if(length(x) > 1)
{
print(paste("Start position:", pos, "had", length(x), "matches", sep = " "))
x <- paste(dat[[varname]][ind], collapse = ";")
}
return( x )
}
return(NA)
}
# ------------------------------------------------------------- #
# ------------------------ readMethylIdat --------------------- #
# read methylation data in .idat format
readMethylIdat <- function( IDATNAME )
{
# reads in two idats with the same rootname
# eg ROOT_Red.idat and ROOT_Grn.idatu
idats <- c(IDATNAME)
rgset <- read.metharray(idats, verbose = T)
mset <- preprocessIllumina(rgset)
mset <- mapToGenome(mset)
df <- data.frame( chr = rep(mset@rowRanges@seqnames@values, mset@rowRanges@seqnames@lengths),
start = mset@rowRanges@ranges@start,
name = rownames(mset) )
colnames(df) <- c("chr", "start", "name")
return(df)
}
# ------------------------------------------------------------ #
# --------------------------- readBed ------------------------ #
readBed <- function( BEDFILE, bedheader )
# read in bedfile, attach header, reorder by chromosome and then position
{
dat <- fread(BEDFILE, header = F)
dat <- as.data.frame(dat)
colnames(dat) <- bedheader
dat$Chrom <- factor(dat$Chrom, chr.in.order)
dat <- dat[order(dat$Chrom, dat$Start),]
# qvalue is p-value adjusted for multiple comparisons
# to get p-value from -log10: 10^-p
if( "-log10(p)" %in% colnames(dat))
{
dat$p <- 10^-(dat$`-log10(p)`)
}
if( "-log10(q)" %in% colnames(dat))
{
dat$q <- 10^-(dat$`-log10(q)`)
}
return(dat)
}
# ---------------------------------------------------------- #
# ----------------------- convertToBinIn-------------------- #
convertToBinInt <- function( var )
# convert data of any type to a binary integer (e.g. is a chromatin feature present or absent)
# input: var, a column of data from the annotation table
# output: var, the same data converted to binary int
{
var[ is.na(var) ] <- 0
var[ var != 0 ] <- 1
var <- as.integer(var)
return(var)
}
# ---------------------------------------------------------- #
# ------------------------ convertToBed -------------------- #
convertToBED <- function( dat )
# convert idat data to BED
{
if( all(c("Start", "Chrom", "End") %in% colnames(dat)))
{
out <- data.frame( Chrom = dat$Chrom, Start = dat$Start, End = dat$End, percentMeth = dat$percentMeth)
colnames(out) <- c("Chrom", "Start", "End", "percentMeth")
return(out)
}
if( substr(dat$chr[1], 1, 1) != "c")
{
dat$chr <- paste("chr", as.character(dat$chr), sep ="")
}
out <- data.frame( chrom = dat$chr, chromStart = dat$start - 1,
chromEnd = dat$start, name = dat$name)
colnames(out) <- c("chrom", "chromStart", "chromEnd", "name")
return(out)
}
# ---------------------------------------------------------- #
# ======================= end functions ================================ #
newhits <- c("1:212449403",
"2:111927379",
"5:1280128",
"6:32032421",
"6:33533625",
"8:120933963",
"9:779507",
"9:127190340",
"9:140073294",
"10:7534248",
"11:30351223",
"12:1051495",
"12:51301431",
"12:53793209",
"17:76691564",
"18:692095",
"19:28356614",
"20:52197366",
"X:100432681",
"X:153535143",
"X:24384181",
"X:66489986")
# ========================= MAIN ========================== #
print("Parsing credSet file...")
# the CRV file already has hg19 and hg38 positions attached
credSetFile <- paste(INFILENAME, "/", INFILENAME, ".credSet", sep = "")
crv.dat <- read.table(credSetFile, header = TRUE, as.is= TRUE)
crv.dat$h19pos <- as.integer(unlist(lapply(strsplit(crv.dat$SNP.ID.hg19, ":"), function(x) x[2])))
crv.dat$h38pos <- as.integer(unlist(lapply(strsplit(crv.dat$SNP.ID.hg38, ":"), function(x) x[2])))
chr = paste("chr", crv.dat$chr[1], sep = "")
crv.dat$z <- crv.dat$Effect / crv.dat$se
print("done!")
# write locus file
print("writing locus file...")
LOCUSFILE <- paste(INFILENAME, INFILENAME, sep = "/")
write.table(crv.dat[,colnames(crv.dat) %in% c("chr", "SNP.ID.hg19", "rsID", "P", "z", "h19pos" )],
LOCUSFILE, col.names = T, row.names = F, quote = F)
print("done!")
print("get features from biomart...")
# get snp and gene mappings
# germline data
grch37.snp = useMart(biomart="ENSEMBL_MART_SNP", host="grch37.ensembl.org",dataset="hsapiens_snp")
#Mart used to map Ensembl Gene IDs to Gene name
grch37 = useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl")
snp.dat <- getBM(attributes = c("refsnp_id", "ensembl_gene_stable_id", "associated_gene", "consequence_type_tv"),
filters = "snp_filter",
values = crv.dat$rsID,
mart = grch37.snp)
# associated gene doesnt map to every instance of ensembl gene, and some
# annotations are missing- use this list from ENSEMBL and dbsnp
snp.dat$associated_gene <- as.character(snp.dat$associated_gene)
# local db of enseml genes and associated gene name
en.list <- c("ENSG00000152219", "ENSG00000186452", "ENSG00000261824",
"ENSG00000261770", "ENSG00000267575", "ENSG00000272635",
"ENSG00000267389", "ENSG00000266977", "ENSG00000267623",
"ENSG00000267630", "ENSG00000234915", "ENSG00000066027",
"ENSG00000065600", "ENSG00000234915", "ENSG00000115112",
"ENSG00000124374", "ENSG00000114850", "ENSG00000108669",
"ENSG00000267623", "ENSG00000092345", "ENSG00000173890",
"ENSG00000268220", "ENSG00000173889", "ENSG00000163558",
"ENSG00000109323", "ENSG00000109332", "ENSG00000246560",
"ENSG00000145354", "ENSG00000164037", "ENSG00000164038",
"ENSG00000164039", "ENSG00000138778", "ENSG00000248740",
"ENSG00000138769", "ENSG00000138757", "ENSG00000246541",
"ENSG00000163104", "ENSG00000163106", "ENSG00000164362",
"LRG_343", "ENSG00000204344", "ENSG00000234947",
"ENSG00000226257", "ENSG00000226033", "ENSG00000206342",
"ENSG00000272295", "ENSG00000206338", "ENSG00000236250",
"ENSG00000204344", "ENSG00000234947", "ENSG00000168477",
"ENSG00000213676", "ENSG00000233323", "ENSG00000234539",
"ENSG00000231608", "ENSG00000168468", "ENSG00000206258",
"ENSG00000229353", "ENSG00000228628", "ENSG00000112514",
"ENSG00000197283", "ENSG00000242014", "ENSG00000226492",
"ENSG00000227460", "ENSG00000245330", "ENSG00000176058",
"ENSG00000176101", "ENSG00000176248", "ENSG00000176884",
"ENSG00000261793", "ENSG00000002016", "ENSG00000170374",
"ENSG00000185591", "ENSG00000135409", "ENSG00000257379",
"ENSG00000205352", "ENSG00000197111", "ENSG00000139625",
"ENSG00000139546", "ENSG00000267281", "ENSG00000170653",
"ENSG00000135390", "ENSG00000176105", "ENSG00000132199",
"ENSG00000265490" ,"ENSG00000266171", "ENSG00000261824",
"ENSG00000261770", "ENSG00000102384", "ENSG00000268013",
"ENSG00000007350", "ENSG00000269329", "ENSG00000196924",
"ENSG00000102080", "ENSG00000185254", "ENSG00000223731",
"ENSG00000226280", "ENSG00000169083", "ENSG00000204581",
"ENSG00000153093", "ENSG00000153094", "ENSG00000049656",
"ENSG00000132570", "ENSG00000152705", "ENSG00000224186",
"ENSG00000069011", "ENSG00000187678", "ENSG00000231185",
"ENSG00000235168", "ENSG00000055211", "ENSG00000237502",
"ENSG00000186625", "ENSG00000131023", "ENSG00000120253",
"ENSG00000120265", "ENSG00000219433", "ENSG00000231760",
"ENSG00000120256", "ENSG00000268592", "ENSG00000217733",
"ENSG00000164520", "ENSG00000223701", "ENSG00000216906",
"ENSG00000203722", "ENSG00000096433", "ENSG00000030110",
"ENSG00000197251", "ENSG00000204188", "ENSG00000002822",
"ENSG00000176349", "ENSG00000147596", "ENSG00000137090",
"ENSG00000259290", "ENSG00000118369", "ENSG00000033327",
"ENSG00000244573", "ENSG00000121316", "ENSG00000126775",
"ENSG00000182521", "ENSG00000166450", "ENSG00000259180",
"ENSG00000166938", "ENSG00000260773", "ENSG00000075131",
"ENSG00000169032", "ENSG00000261351", "ENSG00000174446",
"ENSG00000174444", "ENSG00000174442", "ENSG00000188501",
"ENSG00000262117", "ENSG00000171490", "ENSG00000263307",
"ENSG00000103342", "ENSG00000261560", "ENSG00000234719",
"ENSG00000156968", "ENSG00000183793", "ENSG00000205423",
"ENSG00000259843", "ENSG00000260381", "ENSG00000261170",
"ENSG00000260539", "ENSG00000090863", "ENSG00000168411",
"ENSG00000168404", "ENSG00000263456", "ENSG00000108753",
"ENSG00000259549", "ENSG00000160321", "ENSG00000269615",
"ENSG00000271095", "ENSG00000269504", "ENSG00000229676",
"ENSG00000268981", "ENSG00000198153", "ENSG00000268789",
"ENSG00000213973", "ENSG00000269509", "ENSG00000268696",
"ENSG00000269067", "ENSG00000271661", "ENSG00000261558",
"ENSG00000261615", "ENSG00000267886", "ENSG00000183850",
"ENSG00000213096", "ENSG00000197372", "ENSG00000269289",
"ENSG00000213967", "ENSG00000229000", "ENSG00000020256",
"ENSG00000228404", "ENSG00000160285", "ENSG00000223901",
"ENSG00000215424", "ENSG00000160294", "ENSG00000228137",
"ENSG00000239415", "ENSG00000182362", "ENSG00000160298",
"ENSG00000160299", "ENSG00000223692", "ENSG00000160305",
"ENSG00000099949", "ENSG00000265148", "ENSG00000213246",
"ENSG00000108375", "ENSG00000176160", "ENSG00000108389",
"ENSG00000264672", "ENSG00000108387", "ENSG00000181013",
"ENSG00000121101", "ENSG00000212195", "ENSG00000108384",
"ENSG00000175175", "ENSG00000263938", "ENSG00000108395",
"ENSG00000224738", "ENSG00000182628", "ENSG00000232160",
"ENSG00000076770", "ENSG00000171004", "ENSG00000123728",
"ENSG00000079313", "ENSG00000129911", "ENSG00000267141")
gene.list <- c("ARL14EP", "TMPRSS12", "LINC00662",
"AC006504.1", "CTC-459F4.3", "LLNLF-65H9.1",
"AC006504.4", "AC006504.2", "not found",
"AC005758.1", "AL360091.3", "PPP2R5A",
"TMEM206", "RP11-384C4.7", "TFCP2L1",
"PAIP2B", "SSR3", "CYTH1", "AC005357.2", "DAZL",
"GPR160", "ENSG00000268220","PHC3", "PRKCI",
"MANBA", "UBE2D3", "UBE2D3-AS1", "CISD2",
"SLC9B1", "SLC9B2", "BDH2", "CENPE",
"LINC02428", "CDKL2", "G3BP2", "AC096746.1",
"SMARCAD1", "HPGDS", "TERT", "TERT",
"STK19", "STK19", "STK19", "STK19", "STK19",
"DAQB-331", "CYP21A2", "STK19", "STK19",
"STK19", "TNXB", "ATF6B", "TNXB", "ATF6B", "TNXB",
"ATF6B", "TNXB", "TNXB", "ATF6B",
"CUTA", "SYNGAP1", "RN7SL26P", "CUTA",
"SYNGAP1", "AP005717.1", "TPRN",
"SSNA1", "ANAPC2", "GRIN1", "AL929554.1", "RAD52",
"SP7", "SP1", "AMHR2", "AC023509.1",
"PRR13", "PCBP2", "MAP3K12", "TARBP2", "ATF7-NPFF",
"ATF7", "ATP5MC2", "YES1", "ENOSF1", "RP11-806L2.6",
"AP0010203.5", "LINC00662", "AC006504.1", "CENPI",
"TKTL1", "TKTL1", "FLNA", "FLNA", "TEX28", "TEX28",
"SUPT20HL1", "AL049641.1", "AR", "ACOXL-AS1", "ACOXL",
"BCL2L11", "CLPTM1L", "PCBD2", "CATSPER3", "C5orf66",
"PITX1", "SPRY4", "SPRY4-AS1", "AL078581.2", "GINM1",
"RP1-12G14.6", "KATNA1", "LATS1", "NUP43", "PCMT1",
"BTBD10P2", "AL355312.2", "LRP11", "AL355312.3",
"CCT7P1", "RAET1E", "RAET1E-AS1", "AL355312.1",
"RAET1G", "ITPR3", "BAK1", "LINC00336", "GGNBP1", "MAD1L1",
"AC104129.1", "PRDM14", "DMRT1", "RP11-687M24.7", "USP35",
"GAB2", "RPL30P11", "PLBD1", "ATG14", "TBPL2", "PRTG",
"AC012378.1", "DIS3L", "RP11-352G18.2", "TIPIN",
"MAP2K1", "AC116913.1", "SNAPC5", "RPL4", "ZWILCH",
"LCTL", "BCAR4", "RSL1D1", "AC007216.4", "GSPT1",
"AC007216.3", "NPIPB2", "MPV17L", "NPIPA5", "CNEP1R1",
"AC007610.1", "RP11-429P3.5", "AC009053.3", "RP11-252A24.7", "GLG1",
"RFWD3", "MLKL", "MIR5189", "HNF1B", "RP11-115K3.1",
"ZNF208", "AC003973.1", "BNIP3P28", "AC003973.4", "ZNF492",
"AC024563.1", "ZNF849P", "VN1R87P", "ZNF99", "BNIP3P34",
"ZNF723", "ZNF728", "BNIP3P36", "LINC01859", "LINC01858", "AC074135.1",
"ZNF730", "ZNF254", "ZNF675", "AC011503.1", "ZNF726",
"SEPT7P8", "ZFP64", "AP001468.1", "LSS", "AP001469.1",
"MCM3AP-AS1", "MCM3AP", "AP001469.2", "AP001469.9",
"YBEY", "C21orf58", "PCNT", "DIP2A-IT1", "DIP2A", "LZTR1",
"TSPOAP1-AS1", "SUPT4H1", "RNF43", "HSF5", "MTMR4",
"SEPTIN4-AS1", "SEPTIN4", "C17orf47", "TEX14", "U3",
"RAD51C", "PPM1E", "GC17M058973", "TRIM37",
"AC099850.1", "SKA2", "RAP2C-AS1", "MBNL3", "HS6ST2", "RAP2C",
"REXO1", "KLF16", "AC012615.4")
gene.key <- data.frame(ensembl = en.list, gene.id = gene.list)
ind = which(snp.dat$ensembl_gene_stable_id != "" & snp.dat$associated_gene != "")
#if( length(ind) > 0)
#{
# tmp <- data.frame( ensembl <- unique(snp.dat$ensembl_gene_stable_id[ind]),
# gene.id <- unique(snp.dat$associated_gene[ind]) )
# colnames(tmp) <- c("ensembl", "gene.id")
# gene.key <- rbind(gene.key, tmp)
#}
if( any(gene.key$gene.id == ""))
{
print("gene.key is missing some definitions- enter these manually into gene.key and rerun")
stop()
}
# map gene data to snp data
ind = match(snp.dat$ensembl_gene_stable_id, gene.key$ensembl)
snp.dat$associated_gene[!is.na(ind)] <- as.character( gene.key$gene.id[ind[!is.na(ind)]] )
gene.dat <- getBM(attributes = c("ensembl_gene_id", "5_utr_start", "5_utr_end",
"3_utr_start", "3_utr_end", "exon_chrom_start", "exon_chrom_end"),
filters = "ensembl_gene_id",
values = snp.dat$ensembl_gene_stable_id,
mart = grch37)
# add exon, 3' utr, 5' utr; snps within range
exonRange <- utr5Range <- utr3Range <- c()
for( gene in unique(gene.dat$ensembl_gene_id))
{
exonRange <- c(IRanges(gene.dat$exon_chrom_start[gene.dat$ensembl_gene_id == gene],
gene.dat$exon_chrom_end[gene.dat$ensembl_gene_id == gene]), exonRange)
tmp <- gene.dat[ !is.na(gene.dat$`5_utr_start`),]
utr5Range <- c(IRanges(tmp$`5_utr_start`[tmp$ensembl_gene_id == gene],
tmp$`5_utr_end`[tmp$ensembl_gene_id == gene]), utr5Range)
tmp <- gene.dat[ !is.na(gene.dat$`3_utr_start`),]
utr3Range <- c(IRanges(tmp$`3_utr_start`[tmp$ensembl_gene_id == gene],
tmp$`3_utr_end`[tmp$ensembl_gene_id == gene]), utr3Range)
}
crv.dat$exons <- 0
crv.dat$utr5 <- 0
crv.dat$utr3 <- 0
if( !any(is.null(c(exonRange, utr5Range, utr3Range) )))
{
crv.dat$exons[findOverlaps(crv.dat$h19pos, exonRange)@from] <- 1
crv.dat$utr5[findOverlaps(crv.dat$h19pos, utr5Range)@from] <- 1
crv.dat$utr3[findOverlaps(crv.dat$h19pos, utr3Range)@from] <- 1
}
results <- merge(snp.dat, gene.dat, by.x = "ensembl_gene_stable_id", by.y = "ensembl_gene_id", all.x=T)
out <- merge(crv.dat, results, by.x = "rsID", by.y = "refsnp_id", all.x = T)
print("done!")
print("adding gene expression data...")
# gene expression ------
# column 1: gene_id - gene name of the gene the transcript belongs to (parent gene). If no gene information is provided, gene_id and transcript_id is the same.
# column 2: transcript_id(s) - transcript name of this transcript
# column 3: length - the transcript's sequence length (poly(A) tail is not counted)
# column 4: effective_length - the length containing only the positions that can generate a valid fragment
# column 5: expected_count - the sum of the posterior probability of each read comes from this transcript over all reads
# column 6: TPM - transcripts per million, a measure of relative measure of transcript abundance
# column 7: FPKM - fragments per kilobase of transcript per million mapped reads, another relative measure of transcript abundance
# column 8: posterior_mean_count - posterior mean estimate calcualted by RSEM's Gibbs sampler
# column 9: posterior_standard_deviation_of_count - posterior standard deviation of counts
# column 10: pme_TPM - posterior mean estimate of TPM
# column 11: pme_FPKM - posterior mean estimate of FPKM
# column 12: TPM_ci_lower_bound - lower bound of 95% credibility interval for TPM values
# column 13: TPM_ci_upper_bound - upper bound of 95% credibility interval for TPM values
# column 14: FPKM_ci_lower_bound - lower bound of 95% credibility interval for FPKM values
# column 15: FPKM_ci_upper_bound - upper bound of 95% credibility interval for FPKM values
dat4 <- as.data.frame(fread("ENCFF438ZIY.tsv", header = T))
# gene ids here have a version number or something, remove this
dat4$gene_id <- unlist(lapply(strsplit(dat4$gene_id, "\\."), function(x) x[1]))
dat4 <- data.frame(dat4$gene_id, dat4$TPM)
colnames(dat4) <- c("gene_id", "ENCFF438ZIY_TPM")
results2 <- merge(dat4, results, by.x = "gene_id", by.y = "ensembl_gene_stable_id", all.x=T)
rm(dat4)
# drop any genes that dont pertain to the data
results2 <- results2[!is.na(results2$refsnp_id),]
# ENCSR229WIW 2016
# have to go back and look at this again, no idea what the values are here
# dat <- as.data.frame(fread("ENCFF361BBQ.tsv", header = T))
# ENCSR755LFM 2013
# transcript quantification (these 2 are identical)
dat <- as.data.frame(fread("ENCFF935XFP.tsv", header = T))
dat <- data.frame(dat$gene_id, dat$TPM)
colnames(dat) <- c("gene_id", "ENCFF935XFP_TPM")
dat$gene_id <- as.character(dat$gene_id)
dat$gene_id <- unlist(lapply(strsplit(dat$gene_id, "\\."), function(x) x[1]))
results3 <- merge(dat, results2, by.x = "gene_id", by.y = "gene_id", all.x = T)
rm(dat)
# not sure how to add in TPPM data, tehre are multiple values per unique snp
results3 <- results3[!is.na(results3$refsnp_id),]
# never actually used this part of the data, leave it in for posterity
# ----------------------------------
# read in each annotation and attach to main crv data.frame
# --------- histone marks ----------
# ENCSR000EXA 2011
# take exp(-q) to get pval
# visualize?
# H3K4me1
print("adding histone marks...")
# originally wrote this to be able to attach any value from the bed file,
# rather than just a binary value; could use attachSnpAnnotSimple instead of
# attachSnpAnnot + convertToBinInt
#ENCSR000EXA <- readBed( "ENCFF450TSY.bed", narrowpeak.bed.header)
#crv.dat$ENCSR000EXA <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXA, chr, "-log10(q)" )) )
#rm(ENCSR000EXA)
# ENCSR000EWZ 2011
# H3K9me3
#ENCSR000EWZ <- readBed( "ENCFF970JXR.bed", narrowpeak.bed.header)
#crv.dat$ENCSR000EWZ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EWZ, chr, "-log10(q)" )) )
#rm(ENCSR000EWZ)
# ENCSR000EXC 2011
# H3K9ac
#ENCSR000EXC <- readBed( "ENCFF304WSW.bed", narrowpeak.bed.header)
#crv.dat$ENCSR000EXC <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXC, chr, "-log10(q)" )) )
#rm(ENCSR000EXC)
# ENCSR494TNM 2016
# CTCF
#ENCSR494TNM <- readBed( "ENCFF146REQ.bed", narrowpeak.bed.header)
#crv.dat$ENCSR494TNM <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR494TNM, chr, "-log10(q)" )) )
#rm(ENCSR494TNM)
# ENCSR000EXB 2011
# H3K36me3
ENCSR000EXB <- readBed( "ENCFF082VKJ.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EXB <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXB, chr, "-log10(q)" )) )
rm(ENCSR000EXB)
# ENCSR611DJQ 2017
# H3K4me3
ENCSR611DJQ <- readBed("ENCFF047XWN.bed", narrowpeak.bed.header)
crv.dat$ENCSR611DJQ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR611DJQ, chr, "-log10(q)")))
rm(ENCSR611DJQ)
# ENCSR000EXE 2011
# H3K27me3
ENCSR000EXE <- readBed("ENCFF355TTQ.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EXE <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXE, chr, "-log10(q)")))
rm(ENCSR000EXE)
# ENCSR091MNT 2019
# H3K27me3
ENCSR091MNT <- readBed("ENCFF327DDV.bed", narrowpeak.bed.header)
crv.dat$ENCSR091MNT <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR091MNT, chr, "-log10(q)")))
rm(ENCSR091MNT)
# ENCSR619EZG 2019
# H3K4me3
ENCSR619EZG <- readBed("ENCFF305TNC.bed", narrowpeak.bed.header)
crv.dat$ENCSR619EZG <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR619EZG, chr, "-log10(q)")))
rm(ENCSR619EZG)
# ENCSR956VQB 2019
# H3K4me1
ENCSR956VQB <- readBed("ENCFF620AJW.bed", narrowpeak.bed.header)
crv.dat$ENCSR956VQB <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR956VQB, chr, "-log10(q)")))
rm(ENCSR956VQB)
# ENCSR136ZQZ 2019
# H3K27ac
ENCSR136ZQZ <- readBed("ENCFF567XUE.bed", narrowpeak.bed.header)
crv.dat$ENCSR136ZQZ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR136ZQZ, chr, "-log10(q)")))
rm(ENCSR136ZQZ)
# ENCSR954IGQ 2019
# H3K27ac
ENCSR954IGQ <- readBed("ENCFF100QOV.bed", narrowpeak.bed.header)
crv.dat$ENCSR954IGQ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR954IGQ, chr, "-log10(q)")))
rm(ENCSR954IGQ)
# ENCSR376JOC
# H3K9me3
ENCSR376JOC <- readBed("ENCFF418NCI.bed", narrowpeak.bed.header)
crv.dat$ENCSR376JOC <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR376JOC, chr, "-log10(q)")))
rm(ENCSR376JOC)
print("done!")
# ----------------------------------
# ------ open chromatin marks ------
# H3K4me3
#print("adding open chromatin marks...")
# ENCSR303XKE 2017
# 5-group for testis male fetal
ENCSR303XKE <- readBed("ENCFF218UBN.bed", cis.reg.header)
crv.dat$ENCSR303XKE <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR303XKE, chr, "Cis.Reg.Name" )))
rm(ENCSR303XKE)
# ENCSR000EPS 2011 (exp and replicate)
# UW human NT2-D1 DNase-seq
# signal value, but no p or q val
dat5 <- readBed("ENCFF366XFZ.bed", narrowpeak.bed.header)
dat6 <- readBed("ENCFF506YRM.bed", narrowpeak.bed.header)
ENCSR000EPS <- joinMethylReplicates(dat5, dat6, chr)
crv.dat$ENCSR000EPS <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EPS, chr, "q" )))
# write.table( ENCSR000EPS[,c(2,1,3,4)], "ENCSR000EPS.bed", col.names = F, row.names = F, quote = F)
rm(ENCSR000EPS)
rm(dat5)
rm(dat6)
# ENCSR729DRB 2013
# male embryo testis tissue
ENCSR729DRB <- readBed("ENCFF843ZSC.bed", narrowpeak.bed.header)
crv.dat$ENCSR729DRB <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR729DRB, chr, '-log10(q)' )))
rm(ENCSR729DRB)
# ENCSR278FHC 2013
# narrowpeak
# male embryo testis tissue
ENCFF012QTD <- readBed("ENCFF012QTD.bed", narrowpeak.bed.header)
crv.dat$ENCFF012QTD <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCFF012QTD, chr, '-log10(q)' )))
rm(ENCFF012QTD)
# broadpeak - differnet header
ENCFF841TKB <- readBed("ENCFF841TKB.bed", broadpeak.bed.header)
crv.dat$ENCFF841TKB <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCFF841TKB, chr, 'q' )))
rm(ENCFF841TKB)
print("done!")
# ----------------------------------
# ----- methylation ------ #
print("adding methylation...")
# ENCSR000DED 2011
# RRBS on testis (w/ replicate)
dat1 <- readBed("ENCFF001TKY.bed", methyl.bed.header)
dat2 <- readBed("ENCFF001TKZ.bed", methyl.bed.header)
ENCSR000DED <- joinMethylReplicates( dat1, dat2 )
rm(dat1)
rm(dat2)
write.table( convertToBED(ENCSR000DED), "methyl1.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR000DED <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR000DED, chr, "percentMeth" )))
rm(ENCSR000DED)
# ENCSR080YRO
# RRBS on testis (w/ replicate)
# ---
dat1 <- readBed("ENCFF001TPW.bed", methyl.bed.header)
dat2 <- readBed("ENCFF001TPX.bed", methyl.bed.header)
ENCSR080YRO <- joinMethylReplicates( dat1, dat2, chr )
write.table( convertToBED(ENCSR080YRO), "ENCSR080YRO.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR080YRO <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR080YRO, chr, "percentMeth" )))
rm(ENCSR080YRO)
methyl.bed.header.short <- methyl.bed.header[c(1,2,3,5,6,11)]
# ENCSR011HZJ 2017 - these are in grch38!!
# CHG meth
ENCFF507JBR <- readBed(paste("ENCFF507JBR/", chr, ".bed", sep = ""), methyl.bed.header.short)
crv.dat$ENCFF507JBR <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnotMethyl, ENCFF507JBR, chr, "percentMeth" )))
rm(ENCFF507JBR)
# CHH meth
ENCFF038JFQ <- readBed(paste("ENCFF038JFQ/", chr, ".bed", sep = ""), methyl.bed.header.short[c(1,2,3,4,6)])
crv.dat$ENCFF038JFQ <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnotMethyl, ENCFF038JFQ, chr, "percentMeth" )))
rm(ENCFF038JFQ)
# CpG meth
ENCFF715DMX <- readBed(paste("ENCFF715DMX/", chr, ".bed", sep = ""), methyl.bed.header.short[c(1,2,3,4,6)])
crv.dat$ENCFF715DMX <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnotMethyl, ENCFF715DMX, chr, "percentMeth" )))
rm(ENCFF715DMX)
# ENCSR806NNG 2018
# idats -----
# the pair of idat files needs to have the same basename, and be appened with "_Red" and "_Grn"
# eg, ENCFF001RHW.idat becomes ENCSR000ABD_Red.idat
# and ENCFF001RHX.idat becomes ENCSR000ABD_Grn.idat
# cell.line, adult male testis
# Whole genome seq
ENCSR806NNG <- readMethylIdat("ENCSR000ABD")
write.table( convertToBED(ENCSR806NNG), "ENCSR806NNG", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR806NNG <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR806NNG, chr, "name" )))
rm(ENCSR806NNG)
# ENCSR304AIL
# tissue, adult male testis
# DNAme
ENCSR304AIL <- readMethylIdat( "ENCSR304AIL" )
write.table( convertToBED(ENCSR304AIL), "ENCSR304AIL", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR304AIL <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR304AIL, chr, "name" )))
rm(ENCSR304AIL)
# ENCSR304PMI
# RRBS cell line
ENCSR304PMI <- readMethylIdat("ENCSR304PMI")
write.table( convertToBED(ENCSR304PMI), "ENCSR304PMI.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR304PMI <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR304PMI, chr, "name" )))
rm(ENCSR304PMI)
# ENCSR962XHD 2017
# DNAme
# cell line
ENCSR962XHD <- readMethylIdat("ENCLB381OUG")
write.table( convertToBED(ENCSR962XHD), "ENCSR962XHD.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR962XHD <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR962XHD, chr, "name" )))
rm(ENCSR962XHD)
# ENCSR942OLI 2017
# tissue adult male testis
# DNAme
ENCSR942OLI <- readMethylIdat("ENCSR942OLI")
write.table( convertToBED(ENCSR942OLI), "ENCSR942OLI.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR942OLI <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR942OLI, chr, "name" )))
rm(ENCSR942OLI)
print("done!")
# ----------------------------------
# ---------- transcription start sites --------- #
print("adding transcription start sites...")
# what are the values here
# adult male testis RAMPAGE
# gene quantifications, or transcription start sites?
# ENCSR866SRG 2016
#dat <- read.table("ENCFF874RBV.tsv", header = FALSE)
#tss.header <- c("chr", "Start", "End", "Name1", "Score", "Strand", "idk", "gene1", "Gene", "gene2")
# colnames(dat) <- tss.header
tss.header <- c("Chrom", "Start", "End", "Name1", "const", "Strand", "Score", "Name2", "Name3", "Name4", "coords")
ENCSR866SRG = readBed("ENCFF648HUU.bed", tss.header)
crv.dat$ENCSR866SRG <- convertToBinInt(unlist(lapply(crv.dat$h19pos, attachSnpAnnot, ENCSR866SRG, chr, "Score")))
rm(ENCSR866SRG)
# ENCSR841EQJ
# rampage of testis
# columns 4, 8, 9, 10 all look like they have identical information
ENCSR841EQJ <- readBed("ENCFF162KMZ.bed", tss.header)
crv.dat$ENCSR841EQJ <- convertToBinInt(unlist(lapply(crv.dat$h19pos, attachSnpAnnot, ENCSR841EQJ,chr, "Score")))
rm(ENCSR841EQJ)
print("done!")
# ---------------------------------------------- #
# ------- transcription factor binding site ---- #
print("adding trnascription factor binding sites...")
# ENCSR000EWY 2011
# ZNF274
ENCSR000EWY <- readBed("ENCFF637LOO.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EWY <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EWY, chr, "q" )))
rm(ENCSR000EWY)
# ENCSR000EXG 2011
# YY1
#hg38
ENCSR000EXG <- readBed("ENCFF293PVG.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EXG <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnot, ENCSR000EXG, chr, "q" )))
rm(ENCSR000EXG)
# ENCSR981CID 2017
# CTCF adult male 54
ENCSR981CID <- readBed("ENCFF885KKQ.bed", narrowpeak.bed.header)
crv.dat$ENCSR981CID <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR981CID, chr, "q" )))
rm(ENCSR981CID)
# ENCSR803FAP 2017
# POLR2A
ENCSR803FAP <- readBed("ENCFF940TNN.bed", narrowpeak.bed.header)
crv.dat$ENCSR803FAP <- unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR803FAP, chr, "q"))
rm(ENCSR803FAP)
print("done!")
# --------------------------------------------- #
# local data from straun; use files with ld > .4
struan.dat1 <- read.table("struan/Testis_Cancer_novel_NTERA2_0.4_snpInOe_ann.csv", sep = ",", header = TRUE)
#struan.dat2 <- read.table("struan/Testis_Cancer_novel_NTERA2_snpInOe_ann_new.csv", sep = ",", header = TRUE)
struan.dat1.rep <- read.table("struan/Testis_Cancer_replicate_NTERA2_0.4_snpInOe_ann.csv", sep = ",", header = TRUE)
struan.dat <- rbind(struan.dat1, struan.dat1.rep)
struan.dat$INT_FRAG <- as.character(struan.dat$INT_FRAG)
tmp <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[2]))
struan <- data.frame( Chrom <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[1])),
Start <- unlist(lapply(strsplit(tmp, "-"), function(x) x[1])),
End <- unlist(lapply(strsplit(tmp, "-"), function(x) x[2])))
colnames(struan) <- c("Chrom", "Start", "End")
# this line is a duplicate
struan <- struan[-17,]
#write.table(struan, "struan.bed", col.names = F, row.names = F, quote = F)
crv.dat$HIC <- 0
crv.dat$HIC[snpAnnotSimple(crv.dat$h19pos, struan, chr)] <- 1
# local cell line data
EP2102 <- read.table("forChey/2102EP.bed", header = F)
crv.dat$EP2102 <- 0
crv.dat$EP2102[snpAnnotSimple(crv.dat$h19pos, EP2102, chr)] <- 1
TCAM2 <- read.table("forChey/TCAM2.bed", header = F)
crv.dat$TCAM2 <- 0
crv.dat$TCAM2[snpAnnotSimple(crv.dat$h19pos, TCAM2, chr)] <- 1
NTERA2 <- read.table("forChey/NTERA2.bed", header = F)
crv.dat$NTERA2 <- 0
crv.dat$NTERA2[snpAnnotSimple(crv.dat$h19pos, NTERA2, chr)] <- 1
NCCIT <- read.table("forChey/NCCIT.bed", header = F)
crv.dat$NCCIT <- 0
crv.dat$NCCIT[snpAnnotSimple(crv.dat$h19pos, NCCIT, chr)] <- 1
# ----- end annotations
# qc
k <- dim(crv.dat)[2]
print("annotation qc- remove empty columns and NA values...")
crv.dat[,11:k] <- apply(crv.dat[,11:k], 2, replaceNA)
is.emptyAnnot <- function( x )
{
return(all(x == 0))
}
# ---
# remove any annotations that had no matches in the credset
ind <- apply(crv.dat, 2, is.emptyAnnot)
crv.dat.bak <- crv.dat
crv.dat <- crv.dat[,!ind]
print("done!")
print(paste("crv.dat has ", dim(crv.dat)[2], " columns", sep = ""))
if(dim(crv.dat)[2] == 10)
{
print("no valid annotations! exiting")
stop()
}
# write the output for PAINTOR
print("writing output...")
ANNOTFILE <- paste(INFILENAME, "/", INFILENAME, ".annotations", sep = "")
annot <- crv.dat[,11:dim(crv.dat)[2]]
write.table(annot, ANNOTFILE,
col.names = T, row.names = F, quote = F)
# note number of snps and number of redundant snp
annotation.names <- paste(colnames(annot), collapse = ",")
ANNOTNAMESFILE <- paste(INFILENAME, "annotation.names", sep = "/")
write.table(annotation.names, ANNOTNAMESFILE, quote = F, col.names = F, row.names = F, append = F)
INPUTFILES <- paste(INFILENAME, "input.files", sep = "/")
write.table(INFILENAME, INPUTFILES, quote = F, col.names = F, row.names = F, append = F)
print("done!")
print("successfully completed")
xrange <- c(32008931, 320083111)
bed <- readBed( "ENCFF082VKJ.bed", narrowpeak.bed.header)
#p1 <- ggplot(data = bed, aes(x = Start)) + geom_area(stat = "bin", binwidth = 50)
#p1
bed <- bed[ !is.na(bed$Chrom), ]
bed <- bed[ as.character(bed$Chrom) == "chr6" & bed$Start >= xrange[1] & bed$End <= xrange[2],]
# TNXB: 32008931 - 320083111
p1 <- ggplot(data = bed, aes( x = Start, y = -log10(p))) + geom_area(fill = "blue")
p1
p1 <- ggplot(data = hb) + geom_rect(aes(xmin = BP1, xmax = BP2, ymin = 0.975, ymax = 1.025), fill = "black") +
geom_hline(yintercept = 1, size = 2)
p1
# -- find eqtls
eqtl <- read.table("significantresults_TECAC.GTEx.csv", sep = ",", header = TRUE)
eqtl$Chr <- as.integer(gsub("chr", "", eqtl$Chr))
#eqtl <- eqtl[which(eqtl$Tissue == "Testis"),]
CHR <- unique(crv.dat$chr)
eqtl <- eqtl[eqtl$Chr == CHR,]
novel.genes <- c("PPP2R5A",
"BCL2L11",
"TERT",
"TNXB",
"BAK1",
"DEPTOR",
"DMRT1",
"PSMB7",
"ANAPC2",
"ITIH5",
"ARL14EP",
"RAD52",
"METTL7A",
"SP1",
"CYTH1",
"ENOSF1",
"CTC459",
"ZNF217",
"FAM48B1",
"AR",
"CENPI",
"TKTL1")
tmp.gene <- c("DEPTOR", "PSMB7",
"ITIH5", "METTL7A",
"CTC459", "ZNF217",
"FAM48B1")
tmp.ensg <- c("ENSG00000155792", "ENSG00000136930",
"ENSG00000123243", "ENSG00000185432",
"ENSG00000267575", "ENSG0000171940",
"ENSG00000223731")
nov.ensg <- gene.key[ gene.key$gene.id %in% novel.genes,]
ensg <- c(tmp.ensg, as.character(nov.ensg$ensembl))
eqtl2 <- eqtl[ eqtl$GeneName %in% c(tmp.gene, as.character(nov.ensg$gene.id)),]
genes <- unique(c(tmp.gene, as.character(nov.ensg$gene.id)))
fuma <- read.table("~/Desktop/gtex_v8_ts_avg_log2TPM_exp.txt", header = TRUE)
fuma.sml <- fuma[ , colnames(fuma) %in% c("ensg", "symbol", "Testis")]
fuma.sml <- fuma.sml[ fuma.sml$ensg %in% ensg,]
# Beta_GWAS is based off of the FIRST snp in ALlele_GWAS
# Beta_eQTL is based off of the SECOND snp in allele_eqtl
# ex: chr8:119914393, BetaGWAS = -0.1259, Allele_GWAS = g_c,
# BetaEqtl = -0.276993, Allele_eqtl = C_G
# these are both based off the C allele
# BCL2L11
# betaGWAS -0.1147, alleGWAS c_t
# betaEQTL -0.129164 alleleEQTL C_T
# eqtls from shweta are in hg38
ind = inEqtl( crv.dat$h38pos[32], eqtl)
sum( !is.na( unique(eqtl$Tissue[ind])))
c("Testis", "testis") %in% eqtl$Tissue[ind]
crv.dat <- crv.dat.bak
k = dim(crv.dat)[2]
# plot annotation matrix
mat <- as.matrix(crv.dat[,11:k])
colnames(mat) <- colnames(crv.dat)[11:k]
rownames(mat) <- crv.dat$SNP.ID.hg19
annot <- read.table("../annotation-table.csv", header = T, sep = ",")
annot$Annotation %in% colnames(mat)
# drop utr3', utr5', exons
k = dim(mat)[2]
mat <- mat[,4:k]
# aligns annotations to whats in the text database
# this is used for ordering the annotations eg by type
ind = match(as.character(annot$Annotation), colnames(mat))
mat <- mat[,ind]
u.stat <- read.table(paste(getwd(), INFILENAME, "univariate.stats", sep = "/"), header = F)
u.stat <- as.character(u.stat$V2)
u.stat <- data.frame( annot = unlist(lapply(strsplit(u.stat, ","), function(x) return(x[1]))),
p = unlist(lapply(strsplit(u.stat, ","), function(x) return(x[2]))) )
u.stat$p <- as.numeric(as.character(u.stat$p))
mat <- melt(mat)
ind <- match(mat$Var2, u.stat$annot )
ind2 <- mat$value != 0
mat$value[!is.na(ind) ] <- u.stat$p[ ind[!is.na(ind)]]
mat$value[!ind2] <- 0
mat$log.value <- -log10(mat$value)
mat$log.value[ mat$log.value == Inf ] <- NA
mat$Var2 <- as.character(mat$Var2)
write.table(mat, paste0(INFILENAME, "_annot_plot.txt"), col.names = TRUE, quote = F, row.names = F)
library(RColorBrewer)
mat$log.value[ mat$log.value == 0 ] <- NA
p1 <- ggplot(data = mat, aes(x = Var2, y = Var1, fill=log.value)) + geom_tile() +
geom_tile(data = mat[ mat$log.value != 0,], color = "black") +
xlab("Annotation") +
ylab("CRV") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5),
# legend.position = "none",
panel.border = element_rect(color = "black", fill = NA)) +
# ggtitle(gsub("_", ":", INFILENAME)) +
scale_fill_gradient2( low = "red", mid = "white", high = "green",
midpoint = -log10(0.05),
breaks = c(-1, -log10(0.05), -log10(0.001), -log10(0.0001)),
labels = c(1, 0.05, 0.001, 0.0001),
name = "p-value",
limits = c(-1,4),
na.value = "white")
p1
# scale_fill_gradientn( colors = brewer.pal(n = 9, name = "Blues"),
# breaks = c(0, -log10(0.05), -log10(0.001), -log10(0.00001)),
# labels = c(1, 0.05, 0.001, 0.00001),
# name = "p-value",
# limits = c(0,5),
# na.value = "white")
#p1
# scale_fill_gradient2( guide = "colourbar",
# low = "white",
# high = "royalblue3",
# midpoint = -log10(1),
# limits = c(0,4),
# breaks = c(0, -log10(0.05), -log10(0.001), -log10(0.0001)),
# labels = c(1, 0.05, 0.001, 0.0001),
# name = "p-value")
png(paste0(INFILENAME, "_annot.png"), width = 600, height = dim(crv.dat)[1] * 15)
print(p1)
dev.off()
# PAINTOR debugging-
# make sure tabs instead of spaces
# ld matrix is invertable
# same number of snps in all files
# run paintor as follows:
#~/PAINTOR_V3.0/PAINTOR -input input.files -in . -out .
# -Zhead z -LDname ld -Gname MVTEST -Lname BF_MVTEST -enumerate 4 -annotations H3K36me3,H3K4me3.cisreg
# use getLRT.p to get the pvalue comparing annotation model to baseline
dat <- read.table("hic_snps.txt",header = F )
# empty cells
dat <- dat[-549,]
dat$bp <- unlist(lapply(strsplit(dat$V1, "--"), function(x) x[1]))
dat$refbp <- unlist(lapply(strsplit(dat$V1, "_"), function(x) x[2]))
dat$chr <- unlist(lapply(strsplit(dat$V1, "--"), function(x) x[2]))
dat$chr <- paste0("chr", unlist(lapply(strsplit(dat$chr, "_"), function(x) x[1])))
struan.dat <- rbind(struan.dat1, struan.dat1.rep)
struan.dat$INT_FRAG <- as.character(struan.dat$INT_FRAG)
tmp <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[2]))
struan <- data.frame( Chrom <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[1])),
Start <- unlist(lapply(strsplit(tmp, "-"), function(x) x[1])),
End <- unlist(lapply(strsplit(tmp, "-"), function(x) x[2])))
colnames(struan) <- c("Chrom", "Start", "End")
# this line is a duplicate
struan <- struan[-17,]
#write.table(struan, "struan.bed", col.names = F, row.names = F, quote = F)
dat$bp <- as.integer(dat$bp)
getOverlap <- function( dat1, dat2)
{
ind <- rep(0, dim(dat1)[1])
for( i in 1:dim(dat1)[1])
{
if( length( snpAnnotSimple(dat1$bp[i], dat2, dat1$chr[i]) ) > 0 )
{
ind[i] <- 1
}
}
return(ind)
}
dat$struan <- getOverlap(dat, struan)
dat$EP2102 <- getOverlap(dat, EP2102)
dat$TCAM2 <- getOverlap(dat, TCAM2)
dat$NNCIT <- getOverlap(dat, NCCIT)
dat$NTERA2 <- getOverlap(dat, NTERA2)
write.table(dat, "hic_ATAC_seq.txt", row.names = F, col.names = T, quote = F)
| /annotation/addFunctionalAnnotation.R | no_license | nathanson-lab/TGCT_2021_NatureCommunications | R | false | false | 47,726 | r | #!/usr/bin/env Rscript
# pluta 11/7/19
# this script was written ad-hoc as analysis developed, a finalized version
# will be much more organized
#rm(list = ls())
#args = commandArgs(trailingOnly = TRUE)
#if( length(args) < 1)
#{
# stop('need to provide arguments: FILENAME, the reference snp of the credset')
#}
INFILENAME=args[1]
len <- nchar(INFILENAME)
if( substr(INFILENAME, len, len) == "/" )
{
INFILENAME <- substr(INFILENAME, 1, len - 1)
}
# each locus is computed independently; run script per locus
# ENCSR418RNI; ENCSR886NTH; ENCSR303XKE; ENCSR898RGU all annotate identically
# drop three of these
# ================== preprocesser =================== #
# ---------------------------- libraries ------------- #
library(data.table)
library(ggplot2)
library(IRanges)
library(BiocManager)
setwd("/Users/johnpluta/Documents/nathansonlab/tecac-manuscript/annotation")
# --------------------------------------------------- #
# ----- header definitions ----- #
methyl.bed.header <- c("Chrom", "Start", "End", "Name", "Score", "strand","Start2", "End2", "rgb", "count", "percentMeth")
narrowpeak.bed.header <- c("Chrom", "Start", "End", "name", "Score", "strand", "signalVal", "-log10(p)", "-log10(q)", "peak")
broadpeak.bed.header <- c("Chrom", "Start", "End", "name", "Score", "strand", "signalVal", "p", "q")
cis.reg.header <- c("Chrom", "Start", "End", "Cis.Reg.Name", "Score", "strand", "Start2", "End2", "color")
chr.in.order <- c(paste("chr", c(seq(1:22), "X"), sep = ""))
# ----------------------------- #
# =================================================== #
# ================== functions =========================== #
# ---------------- getLRT.p ----------------------------- #
# get the p-value for the likelihood ratio test between the baseline
# model and the model with selected covariate(s)
getLRT.p <- function( logBF.Base, logBF.Model )
# input: logBF.Base, logBF.model (numeric): log(baye's factor) from
# the two models- this comes from PAINTOR
# output: p-value (numeric), p-value for the LRT between the two models
{
# LRT ~ x^2 with 1df
return( 1 - pchisq( -2 * (logBF.Base - logBF.Model), 1))
}
# -------------------------------------------------------- #
# ----------------------- inEqtl ------------------------- #
# check if a crv is in an eqtl
inEqtl <- function( pos, eqtl )
# input: pos, snp position (hg38)
# eqtl: eqtl position
# output: ind (logical), logical vector of which eqtls are in the crv
{
return( eqtl$LD.block.end >= pos & eqtl$LD.block.start <= pos )
}
# -------------------------------------------------------- #
# -------------------- snpAnnotSimple -------------------- #
# simple version of annotate snp that returns a logical vector
# no longer need to convert the output to logical
snpAnnotSimple <- function( pos, bed, chr )
# input: pos (integer), snp poistion
# bed (data.frame), data.frame of the bed file that is
# being annotated from
# chr (integer), the chromosome of interest
# output: ind (logical), a vector of which crvs overlap with
# annotation
{
bed <- bed[ bed$V1 == chr,]
rangeA <- IRanges( pos, pos )
rangeB <- IRanges(bed$V2, bed$V3)
ind <- findOverlaps(rangeA, rangeB, type = "within")
return(ind@from)
}
# -------------------------------------------------------- #
# ----------------------- replaceNA ---------------------- #
# annotation matrix cannot have NA or paintor will crash
# for annotation, it is sufficient to replace NA with 0
replaceNA <- function(x)
{
x[is.na(x)] <- 0
return(x)
}
# --------------------------------------------------------- #
# --------------- joinMethylReplicates ------------------- #
# in methylation data, if there are two sets of data, truncate
# data to only those that appear in both sets. returns a single set of
# data
joinMethylReplicates <- function( dat1, dat2, chr = NULL)
# dat1 (data.frame), bed file of replicate 1
# dat2 (data.frame), bed file or replicate 2
# chr (integer), if chr is NULL, run genome wide; else subset
{
out <- c()
if( is.null(chr))
{
tmp1 = dat1
tmp2 = dat2
} else
{
tmp1 <- dat1[ dat1$Chrom == chr, ]
tmp2 <- dat2[ dat2$Chrom == chr, ]
}
tmp.out <- merge(tmp1, tmp2, by.x = "Start", by.y = "Start")
tmp.out <- tmp.out[ !is.na(tmp.out$Chrom.x),]
if( "percentMeth.x" %in% colnames(tmp.out))
{
tmp.out <- tmp.out[,colnames(tmp.out) %in% c("Start", "Chrom.x", "End.x", "percentMeth.x")]
colnames(tmp.out) <- c("Start", "Chrom", "End", "percentMeth")
} else
{
tmp.out <- tmp.out[,colnames(tmp.out) %in% c("Start", "Chrom.x", "End.x", "q.x")]
colnames(tmp.out) <- c("Start", "Chrom", "End", "q")
}
out <- rbind(tmp.out, out)
return(out)
}
# ------------------------------------------------------------#
# ---------------- attachSnpAnnotMethyl ------------------- #
# attach features based on snp id/position
attachSnpAnnotMethyl <- function( pos, dat, chr, varname )
# only slightly different than attachSnpAnnot, to account for different
# formatting in methylation files; probably a smarter way to do this,
# or roll both functions into attachSnpAnnotSimple if we only care about
# binary values
{
if( is.null(pos) )
{
stop("pos has null value, did you pass the right attribute?")
}
# subset the functional data so we only pull snps in the same chromosome
# as start
dat <- dat[dat$Chrom == chr,]
if( !(varname %in% colnames(dat)))
{
print(paste(varname, " not found in data.", sep = ""))
stop()
}
# find snps in range
ind <- dat$start %in% pos
if( length(ind) > 0)
{
x <- dat[[varname]][ind]
# one snp can map to multiple genes (?); need to concatenate into one var
if(length(x) > 1)
{
print(paste("Start position:", pos, "had", length(x), "matches", sep = " "))
x <- paste(dat[[varname]][ind], collapse = ";")
}
return( x )
}
return(NA)
}
# ----------------------------------------------------------- #
# ---------------- attachSnpAnnot ------------------- #
# attach features based on snp id/position
attachSnpAnnot <- function( pos, dat, chr, varname )
# input:
# pos (integer), position of snp from the credible set
# dat (data.frame), the bed file data
# chr (integer), chromosome to subset by
# varname (string), which attribute do we want to return?
# this only matters if not using a binary value
#
# output: return varname values that are in CRV regions
{
if( is.null(pos) )
{
stop("pos has null value, did you pass the right attribute?")
}
# subset the functional data so we only pull snps in the same chromosome
# as start
dat <- dat[dat$Chrom == chr,]
if( !(varname %in% colnames(dat)))
{
print(paste(varname, " not found in data.", sep = ""))
stop()
}
# find snps in range
ind = which(dat$Start <= pos & dat$End >= pos)
if( length(ind) > 0)
{
x <- dat[[varname]][ind]
# one snp can map to multiple genes (?); need to concatenate into one var
if(length(x) > 1)
{
print(paste("Start position:", pos, "had", length(x), "matches", sep = " "))
x <- paste(dat[[varname]][ind], collapse = ";")
}
return( x )
}
return(NA)
}
# ------------------------------------------------------------- #
# ------------------------ readMethylIdat --------------------- #
# read methylation data in .idat format
readMethylIdat <- function( IDATNAME )
{
# reads in two idats with the same rootname
# eg ROOT_Red.idat and ROOT_Grn.idatu
idats <- c(IDATNAME)
rgset <- read.metharray(idats, verbose = T)
mset <- preprocessIllumina(rgset)
mset <- mapToGenome(mset)
df <- data.frame( chr = rep(mset@rowRanges@seqnames@values, mset@rowRanges@seqnames@lengths),
start = mset@rowRanges@ranges@start,
name = rownames(mset) )
colnames(df) <- c("chr", "start", "name")
return(df)
}
# ------------------------------------------------------------ #
# --------------------------- readBed ------------------------ #
readBed <- function( BEDFILE, bedheader )
# read in bedfile, attach header, reorder by chromosome and then position
{
dat <- fread(BEDFILE, header = F)
dat <- as.data.frame(dat)
colnames(dat) <- bedheader
dat$Chrom <- factor(dat$Chrom, chr.in.order)
dat <- dat[order(dat$Chrom, dat$Start),]
# qvalue is p-value adjusted for multiple comparisons
# to get p-value from -log10: 10^-p
if( "-log10(p)" %in% colnames(dat))
{
dat$p <- 10^-(dat$`-log10(p)`)
}
if( "-log10(q)" %in% colnames(dat))
{
dat$q <- 10^-(dat$`-log10(q)`)
}
return(dat)
}
# ---------------------------------------------------------- #
# ----------------------- convertToBinIn-------------------- #
convertToBinInt <- function( var )
# convert data of any type to a binary integer (e.g. is a chromatin feature present or absent)
# input: var, a column of data from the annotation table
# output: var, the same data converted to binary int
{
var[ is.na(var) ] <- 0
var[ var != 0 ] <- 1
var <- as.integer(var)
return(var)
}
# ---------------------------------------------------------- #
# ------------------------ convertToBed -------------------- #
convertToBED <- function( dat )
# convert idat data to BED
{
if( all(c("Start", "Chrom", "End") %in% colnames(dat)))
{
out <- data.frame( Chrom = dat$Chrom, Start = dat$Start, End = dat$End, percentMeth = dat$percentMeth)
colnames(out) <- c("Chrom", "Start", "End", "percentMeth")
return(out)
}
if( substr(dat$chr[1], 1, 1) != "c")
{
dat$chr <- paste("chr", as.character(dat$chr), sep ="")
}
out <- data.frame( chrom = dat$chr, chromStart = dat$start - 1,
chromEnd = dat$start, name = dat$name)
colnames(out) <- c("chrom", "chromStart", "chromEnd", "name")
return(out)
}
# ---------------------------------------------------------- #
# ======================= end functions ================================ #
newhits <- c("1:212449403",
"2:111927379",
"5:1280128",
"6:32032421",
"6:33533625",
"8:120933963",
"9:779507",
"9:127190340",
"9:140073294",
"10:7534248",
"11:30351223",
"12:1051495",
"12:51301431",
"12:53793209",
"17:76691564",
"18:692095",
"19:28356614",
"20:52197366",
"X:100432681",
"X:153535143",
"X:24384181",
"X:66489986")
# ========================= MAIN ========================== #
print("Parsing credSet file...")
# the CRV file already has hg19 and hg38 positions attached
credSetFile <- paste(INFILENAME, "/", INFILENAME, ".credSet", sep = "")
crv.dat <- read.table(credSetFile, header = TRUE, as.is= TRUE)
crv.dat$h19pos <- as.integer(unlist(lapply(strsplit(crv.dat$SNP.ID.hg19, ":"), function(x) x[2])))
crv.dat$h38pos <- as.integer(unlist(lapply(strsplit(crv.dat$SNP.ID.hg38, ":"), function(x) x[2])))
chr = paste("chr", crv.dat$chr[1], sep = "")
crv.dat$z <- crv.dat$Effect / crv.dat$se
print("done!")
# write locus file
print("writing locus file...")
LOCUSFILE <- paste(INFILENAME, INFILENAME, sep = "/")
write.table(crv.dat[,colnames(crv.dat) %in% c("chr", "SNP.ID.hg19", "rsID", "P", "z", "h19pos" )],
LOCUSFILE, col.names = T, row.names = F, quote = F)
print("done!")
print("get features from biomart...")
# get snp and gene mappings
# germline data
grch37.snp = useMart(biomart="ENSEMBL_MART_SNP", host="grch37.ensembl.org",dataset="hsapiens_snp")
#Mart used to map Ensembl Gene IDs to Gene name
grch37 = useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl")
snp.dat <- getBM(attributes = c("refsnp_id", "ensembl_gene_stable_id", "associated_gene", "consequence_type_tv"),
filters = "snp_filter",
values = crv.dat$rsID,
mart = grch37.snp)
# associated gene doesnt map to every instance of ensembl gene, and some
# annotations are missing- use this list from ENSEMBL and dbsnp
snp.dat$associated_gene <- as.character(snp.dat$associated_gene)
# local db of enseml genes and associated gene name
en.list <- c("ENSG00000152219", "ENSG00000186452", "ENSG00000261824",
"ENSG00000261770", "ENSG00000267575", "ENSG00000272635",
"ENSG00000267389", "ENSG00000266977", "ENSG00000267623",
"ENSG00000267630", "ENSG00000234915", "ENSG00000066027",
"ENSG00000065600", "ENSG00000234915", "ENSG00000115112",
"ENSG00000124374", "ENSG00000114850", "ENSG00000108669",
"ENSG00000267623", "ENSG00000092345", "ENSG00000173890",
"ENSG00000268220", "ENSG00000173889", "ENSG00000163558",
"ENSG00000109323", "ENSG00000109332", "ENSG00000246560",
"ENSG00000145354", "ENSG00000164037", "ENSG00000164038",
"ENSG00000164039", "ENSG00000138778", "ENSG00000248740",
"ENSG00000138769", "ENSG00000138757", "ENSG00000246541",
"ENSG00000163104", "ENSG00000163106", "ENSG00000164362",
"LRG_343", "ENSG00000204344", "ENSG00000234947",
"ENSG00000226257", "ENSG00000226033", "ENSG00000206342",
"ENSG00000272295", "ENSG00000206338", "ENSG00000236250",
"ENSG00000204344", "ENSG00000234947", "ENSG00000168477",
"ENSG00000213676", "ENSG00000233323", "ENSG00000234539",
"ENSG00000231608", "ENSG00000168468", "ENSG00000206258",
"ENSG00000229353", "ENSG00000228628", "ENSG00000112514",
"ENSG00000197283", "ENSG00000242014", "ENSG00000226492",
"ENSG00000227460", "ENSG00000245330", "ENSG00000176058",
"ENSG00000176101", "ENSG00000176248", "ENSG00000176884",
"ENSG00000261793", "ENSG00000002016", "ENSG00000170374",
"ENSG00000185591", "ENSG00000135409", "ENSG00000257379",
"ENSG00000205352", "ENSG00000197111", "ENSG00000139625",
"ENSG00000139546", "ENSG00000267281", "ENSG00000170653",
"ENSG00000135390", "ENSG00000176105", "ENSG00000132199",
"ENSG00000265490" ,"ENSG00000266171", "ENSG00000261824",
"ENSG00000261770", "ENSG00000102384", "ENSG00000268013",
"ENSG00000007350", "ENSG00000269329", "ENSG00000196924",
"ENSG00000102080", "ENSG00000185254", "ENSG00000223731",
"ENSG00000226280", "ENSG00000169083", "ENSG00000204581",
"ENSG00000153093", "ENSG00000153094", "ENSG00000049656",
"ENSG00000132570", "ENSG00000152705", "ENSG00000224186",
"ENSG00000069011", "ENSG00000187678", "ENSG00000231185",
"ENSG00000235168", "ENSG00000055211", "ENSG00000237502",
"ENSG00000186625", "ENSG00000131023", "ENSG00000120253",
"ENSG00000120265", "ENSG00000219433", "ENSG00000231760",
"ENSG00000120256", "ENSG00000268592", "ENSG00000217733",
"ENSG00000164520", "ENSG00000223701", "ENSG00000216906",
"ENSG00000203722", "ENSG00000096433", "ENSG00000030110",
"ENSG00000197251", "ENSG00000204188", "ENSG00000002822",
"ENSG00000176349", "ENSG00000147596", "ENSG00000137090",
"ENSG00000259290", "ENSG00000118369", "ENSG00000033327",
"ENSG00000244573", "ENSG00000121316", "ENSG00000126775",
"ENSG00000182521", "ENSG00000166450", "ENSG00000259180",
"ENSG00000166938", "ENSG00000260773", "ENSG00000075131",
"ENSG00000169032", "ENSG00000261351", "ENSG00000174446",
"ENSG00000174444", "ENSG00000174442", "ENSG00000188501",
"ENSG00000262117", "ENSG00000171490", "ENSG00000263307",
"ENSG00000103342", "ENSG00000261560", "ENSG00000234719",
"ENSG00000156968", "ENSG00000183793", "ENSG00000205423",
"ENSG00000259843", "ENSG00000260381", "ENSG00000261170",
"ENSG00000260539", "ENSG00000090863", "ENSG00000168411",
"ENSG00000168404", "ENSG00000263456", "ENSG00000108753",
"ENSG00000259549", "ENSG00000160321", "ENSG00000269615",
"ENSG00000271095", "ENSG00000269504", "ENSG00000229676",
"ENSG00000268981", "ENSG00000198153", "ENSG00000268789",
"ENSG00000213973", "ENSG00000269509", "ENSG00000268696",
"ENSG00000269067", "ENSG00000271661", "ENSG00000261558",
"ENSG00000261615", "ENSG00000267886", "ENSG00000183850",
"ENSG00000213096", "ENSG00000197372", "ENSG00000269289",
"ENSG00000213967", "ENSG00000229000", "ENSG00000020256",
"ENSG00000228404", "ENSG00000160285", "ENSG00000223901",
"ENSG00000215424", "ENSG00000160294", "ENSG00000228137",
"ENSG00000239415", "ENSG00000182362", "ENSG00000160298",
"ENSG00000160299", "ENSG00000223692", "ENSG00000160305",
"ENSG00000099949", "ENSG00000265148", "ENSG00000213246",
"ENSG00000108375", "ENSG00000176160", "ENSG00000108389",
"ENSG00000264672", "ENSG00000108387", "ENSG00000181013",
"ENSG00000121101", "ENSG00000212195", "ENSG00000108384",
"ENSG00000175175", "ENSG00000263938", "ENSG00000108395",
"ENSG00000224738", "ENSG00000182628", "ENSG00000232160",
"ENSG00000076770", "ENSG00000171004", "ENSG00000123728",
"ENSG00000079313", "ENSG00000129911", "ENSG00000267141")
gene.list <- c("ARL14EP", "TMPRSS12", "LINC00662",
"AC006504.1", "CTC-459F4.3", "LLNLF-65H9.1",
"AC006504.4", "AC006504.2", "not found",
"AC005758.1", "AL360091.3", "PPP2R5A",
"TMEM206", "RP11-384C4.7", "TFCP2L1",
"PAIP2B", "SSR3", "CYTH1", "AC005357.2", "DAZL",
"GPR160", "ENSG00000268220","PHC3", "PRKCI",
"MANBA", "UBE2D3", "UBE2D3-AS1", "CISD2",
"SLC9B1", "SLC9B2", "BDH2", "CENPE",
"LINC02428", "CDKL2", "G3BP2", "AC096746.1",
"SMARCAD1", "HPGDS", "TERT", "TERT",
"STK19", "STK19", "STK19", "STK19", "STK19",
"DAQB-331", "CYP21A2", "STK19", "STK19",
"STK19", "TNXB", "ATF6B", "TNXB", "ATF6B", "TNXB",
"ATF6B", "TNXB", "TNXB", "ATF6B",
"CUTA", "SYNGAP1", "RN7SL26P", "CUTA",
"SYNGAP1", "AP005717.1", "TPRN",
"SSNA1", "ANAPC2", "GRIN1", "AL929554.1", "RAD52",
"SP7", "SP1", "AMHR2", "AC023509.1",
"PRR13", "PCBP2", "MAP3K12", "TARBP2", "ATF7-NPFF",
"ATF7", "ATP5MC2", "YES1", "ENOSF1", "RP11-806L2.6",
"AP0010203.5", "LINC00662", "AC006504.1", "CENPI",
"TKTL1", "TKTL1", "FLNA", "FLNA", "TEX28", "TEX28",
"SUPT20HL1", "AL049641.1", "AR", "ACOXL-AS1", "ACOXL",
"BCL2L11", "CLPTM1L", "PCBD2", "CATSPER3", "C5orf66",
"PITX1", "SPRY4", "SPRY4-AS1", "AL078581.2", "GINM1",
"RP1-12G14.6", "KATNA1", "LATS1", "NUP43", "PCMT1",
"BTBD10P2", "AL355312.2", "LRP11", "AL355312.3",
"CCT7P1", "RAET1E", "RAET1E-AS1", "AL355312.1",
"RAET1G", "ITPR3", "BAK1", "LINC00336", "GGNBP1", "MAD1L1",
"AC104129.1", "PRDM14", "DMRT1", "RP11-687M24.7", "USP35",
"GAB2", "RPL30P11", "PLBD1", "ATG14", "TBPL2", "PRTG",
"AC012378.1", "DIS3L", "RP11-352G18.2", "TIPIN",
"MAP2K1", "AC116913.1", "SNAPC5", "RPL4", "ZWILCH",
"LCTL", "BCAR4", "RSL1D1", "AC007216.4", "GSPT1",
"AC007216.3", "NPIPB2", "MPV17L", "NPIPA5", "CNEP1R1",
"AC007610.1", "RP11-429P3.5", "AC009053.3", "RP11-252A24.7", "GLG1",
"RFWD3", "MLKL", "MIR5189", "HNF1B", "RP11-115K3.1",
"ZNF208", "AC003973.1", "BNIP3P28", "AC003973.4", "ZNF492",
"AC024563.1", "ZNF849P", "VN1R87P", "ZNF99", "BNIP3P34",
"ZNF723", "ZNF728", "BNIP3P36", "LINC01859", "LINC01858", "AC074135.1",
"ZNF730", "ZNF254", "ZNF675", "AC011503.1", "ZNF726",
"SEPT7P8", "ZFP64", "AP001468.1", "LSS", "AP001469.1",
"MCM3AP-AS1", "MCM3AP", "AP001469.2", "AP001469.9",
"YBEY", "C21orf58", "PCNT", "DIP2A-IT1", "DIP2A", "LZTR1",
"TSPOAP1-AS1", "SUPT4H1", "RNF43", "HSF5", "MTMR4",
"SEPTIN4-AS1", "SEPTIN4", "C17orf47", "TEX14", "U3",
"RAD51C", "PPM1E", "GC17M058973", "TRIM37",
"AC099850.1", "SKA2", "RAP2C-AS1", "MBNL3", "HS6ST2", "RAP2C",
"REXO1", "KLF16", "AC012615.4")
gene.key <- data.frame(ensembl = en.list, gene.id = gene.list)
ind = which(snp.dat$ensembl_gene_stable_id != "" & snp.dat$associated_gene != "")
#if( length(ind) > 0)
#{
# tmp <- data.frame( ensembl <- unique(snp.dat$ensembl_gene_stable_id[ind]),
# gene.id <- unique(snp.dat$associated_gene[ind]) )
# colnames(tmp) <- c("ensembl", "gene.id")
# gene.key <- rbind(gene.key, tmp)
#}
if( any(gene.key$gene.id == ""))
{
print("gene.key is missing some definitions- enter these manually into gene.key and rerun")
stop()
}
# map gene data to snp data
ind = match(snp.dat$ensembl_gene_stable_id, gene.key$ensembl)
snp.dat$associated_gene[!is.na(ind)] <- as.character( gene.key$gene.id[ind[!is.na(ind)]] )
gene.dat <- getBM(attributes = c("ensembl_gene_id", "5_utr_start", "5_utr_end",
"3_utr_start", "3_utr_end", "exon_chrom_start", "exon_chrom_end"),
filters = "ensembl_gene_id",
values = snp.dat$ensembl_gene_stable_id,
mart = grch37)
# add exon, 3' utr, 5' utr; snps within range
exonRange <- utr5Range <- utr3Range <- c()
for( gene in unique(gene.dat$ensembl_gene_id))
{
exonRange <- c(IRanges(gene.dat$exon_chrom_start[gene.dat$ensembl_gene_id == gene],
gene.dat$exon_chrom_end[gene.dat$ensembl_gene_id == gene]), exonRange)
tmp <- gene.dat[ !is.na(gene.dat$`5_utr_start`),]
utr5Range <- c(IRanges(tmp$`5_utr_start`[tmp$ensembl_gene_id == gene],
tmp$`5_utr_end`[tmp$ensembl_gene_id == gene]), utr5Range)
tmp <- gene.dat[ !is.na(gene.dat$`3_utr_start`),]
utr3Range <- c(IRanges(tmp$`3_utr_start`[tmp$ensembl_gene_id == gene],
tmp$`3_utr_end`[tmp$ensembl_gene_id == gene]), utr3Range)
}
crv.dat$exons <- 0
crv.dat$utr5 <- 0
crv.dat$utr3 <- 0
if( !any(is.null(c(exonRange, utr5Range, utr3Range) )))
{
crv.dat$exons[findOverlaps(crv.dat$h19pos, exonRange)@from] <- 1
crv.dat$utr5[findOverlaps(crv.dat$h19pos, utr5Range)@from] <- 1
crv.dat$utr3[findOverlaps(crv.dat$h19pos, utr3Range)@from] <- 1
}
results <- merge(snp.dat, gene.dat, by.x = "ensembl_gene_stable_id", by.y = "ensembl_gene_id", all.x=T)
out <- merge(crv.dat, results, by.x = "rsID", by.y = "refsnp_id", all.x = T)
print("done!")
print("adding gene expression data...")
# gene expression ------
# column 1: gene_id - gene name of the gene the transcript belongs to (parent gene). If no gene information is provided, gene_id and transcript_id is the same.
# column 2: transcript_id(s) - transcript name of this transcript
# column 3: length - the transcript's sequence length (poly(A) tail is not counted)
# column 4: effective_length - the length containing only the positions that can generate a valid fragment
# column 5: expected_count - the sum of the posterior probability of each read comes from this transcript over all reads
# column 6: TPM - transcripts per million, a measure of relative measure of transcript abundance
# column 7: FPKM - fragments per kilobase of transcript per million mapped reads, another relative measure of transcript abundance
# column 8: posterior_mean_count - posterior mean estimate calcualted by RSEM's Gibbs sampler
# column 9: posterior_standard_deviation_of_count - posterior standard deviation of counts
# column 10: pme_TPM - posterior mean estimate of TPM
# column 11: pme_FPKM - posterior mean estimate of FPKM
# column 12: TPM_ci_lower_bound - lower bound of 95% credibility interval for TPM values
# column 13: TPM_ci_upper_bound - upper bound of 95% credibility interval for TPM values
# column 14: FPKM_ci_lower_bound - lower bound of 95% credibility interval for FPKM values
# column 15: FPKM_ci_upper_bound - upper bound of 95% credibility interval for FPKM values
dat4 <- as.data.frame(fread("ENCFF438ZIY.tsv", header = T))
# gene ids here have a version number or something, remove this
dat4$gene_id <- unlist(lapply(strsplit(dat4$gene_id, "\\."), function(x) x[1]))
dat4 <- data.frame(dat4$gene_id, dat4$TPM)
colnames(dat4) <- c("gene_id", "ENCFF438ZIY_TPM")
results2 <- merge(dat4, results, by.x = "gene_id", by.y = "ensembl_gene_stable_id", all.x=T)
rm(dat4)
# drop any genes that dont pertain to the data
results2 <- results2[!is.na(results2$refsnp_id),]
# ENCSR229WIW 2016
# have to go back and look at this again, no idea what the values are here
# dat <- as.data.frame(fread("ENCFF361BBQ.tsv", header = T))
# ENCSR755LFM 2013
# transcript quantification (these 2 are identical)
dat <- as.data.frame(fread("ENCFF935XFP.tsv", header = T))
dat <- data.frame(dat$gene_id, dat$TPM)
colnames(dat) <- c("gene_id", "ENCFF935XFP_TPM")
dat$gene_id <- as.character(dat$gene_id)
dat$gene_id <- unlist(lapply(strsplit(dat$gene_id, "\\."), function(x) x[1]))
results3 <- merge(dat, results2, by.x = "gene_id", by.y = "gene_id", all.x = T)
rm(dat)
# not sure how to add in TPPM data, tehre are multiple values per unique snp
results3 <- results3[!is.na(results3$refsnp_id),]
# never actually used this part of the data, leave it in for posterity
# ----------------------------------
# read in each annotation and attach to main crv data.frame
# --------- histone marks ----------
# ENCSR000EXA 2011
# take exp(-q) to get pval
# visualize?
# H3K4me1
print("adding histone marks...")
# originally wrote this to be able to attach any value from the bed file,
# rather than just a binary value; could use attachSnpAnnotSimple instead of
# attachSnpAnnot + convertToBinInt
#ENCSR000EXA <- readBed( "ENCFF450TSY.bed", narrowpeak.bed.header)
#crv.dat$ENCSR000EXA <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXA, chr, "-log10(q)" )) )
#rm(ENCSR000EXA)
# ENCSR000EWZ 2011
# H3K9me3
#ENCSR000EWZ <- readBed( "ENCFF970JXR.bed", narrowpeak.bed.header)
#crv.dat$ENCSR000EWZ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EWZ, chr, "-log10(q)" )) )
#rm(ENCSR000EWZ)
# ENCSR000EXC 2011
# H3K9ac
#ENCSR000EXC <- readBed( "ENCFF304WSW.bed", narrowpeak.bed.header)
#crv.dat$ENCSR000EXC <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXC, chr, "-log10(q)" )) )
#rm(ENCSR000EXC)
# ENCSR494TNM 2016
# CTCF
#ENCSR494TNM <- readBed( "ENCFF146REQ.bed", narrowpeak.bed.header)
#crv.dat$ENCSR494TNM <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR494TNM, chr, "-log10(q)" )) )
#rm(ENCSR494TNM)
# ENCSR000EXB 2011
# H3K36me3
ENCSR000EXB <- readBed( "ENCFF082VKJ.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EXB <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXB, chr, "-log10(q)" )) )
rm(ENCSR000EXB)
# ENCSR611DJQ 2017
# H3K4me3
ENCSR611DJQ <- readBed("ENCFF047XWN.bed", narrowpeak.bed.header)
crv.dat$ENCSR611DJQ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR611DJQ, chr, "-log10(q)")))
rm(ENCSR611DJQ)
# ENCSR000EXE 2011
# H3K27me3
ENCSR000EXE <- readBed("ENCFF355TTQ.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EXE <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EXE, chr, "-log10(q)")))
rm(ENCSR000EXE)
# ENCSR091MNT 2019
# H3K27me3
ENCSR091MNT <- readBed("ENCFF327DDV.bed", narrowpeak.bed.header)
crv.dat$ENCSR091MNT <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR091MNT, chr, "-log10(q)")))
rm(ENCSR091MNT)
# ENCSR619EZG 2019
# H3K4me3
ENCSR619EZG <- readBed("ENCFF305TNC.bed", narrowpeak.bed.header)
crv.dat$ENCSR619EZG <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR619EZG, chr, "-log10(q)")))
rm(ENCSR619EZG)
# ENCSR956VQB 2019
# H3K4me1
ENCSR956VQB <- readBed("ENCFF620AJW.bed", narrowpeak.bed.header)
crv.dat$ENCSR956VQB <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR956VQB, chr, "-log10(q)")))
rm(ENCSR956VQB)
# ENCSR136ZQZ 2019
# H3K27ac
ENCSR136ZQZ <- readBed("ENCFF567XUE.bed", narrowpeak.bed.header)
crv.dat$ENCSR136ZQZ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR136ZQZ, chr, "-log10(q)")))
rm(ENCSR136ZQZ)
# ENCSR954IGQ 2019
# H3K27ac
ENCSR954IGQ <- readBed("ENCFF100QOV.bed", narrowpeak.bed.header)
crv.dat$ENCSR954IGQ <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR954IGQ, chr, "-log10(q)")))
rm(ENCSR954IGQ)
# ENCSR376JOC
# H3K9me3
ENCSR376JOC <- readBed("ENCFF418NCI.bed", narrowpeak.bed.header)
crv.dat$ENCSR376JOC <- convertToBinInt( unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR376JOC, chr, "-log10(q)")))
rm(ENCSR376JOC)
print("done!")
# ----------------------------------
# ------ open chromatin marks ------
# H3K4me3
#print("adding open chromatin marks...")
# ENCSR303XKE 2017
# 5-group for testis male fetal
ENCSR303XKE <- readBed("ENCFF218UBN.bed", cis.reg.header)
crv.dat$ENCSR303XKE <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR303XKE, chr, "Cis.Reg.Name" )))
rm(ENCSR303XKE)
# ENCSR000EPS 2011 (exp and replicate)
# UW human NT2-D1 DNase-seq
# signal value, but no p or q val
dat5 <- readBed("ENCFF366XFZ.bed", narrowpeak.bed.header)
dat6 <- readBed("ENCFF506YRM.bed", narrowpeak.bed.header)
ENCSR000EPS <- joinMethylReplicates(dat5, dat6, chr)
crv.dat$ENCSR000EPS <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EPS, chr, "q" )))
# write.table( ENCSR000EPS[,c(2,1,3,4)], "ENCSR000EPS.bed", col.names = F, row.names = F, quote = F)
rm(ENCSR000EPS)
rm(dat5)
rm(dat6)
# ENCSR729DRB 2013
# male embryo testis tissue
ENCSR729DRB <- readBed("ENCFF843ZSC.bed", narrowpeak.bed.header)
crv.dat$ENCSR729DRB <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR729DRB, chr, '-log10(q)' )))
rm(ENCSR729DRB)
# ENCSR278FHC 2013
# narrowpeak
# male embryo testis tissue
ENCFF012QTD <- readBed("ENCFF012QTD.bed", narrowpeak.bed.header)
crv.dat$ENCFF012QTD <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCFF012QTD, chr, '-log10(q)' )))
rm(ENCFF012QTD)
# broadpeak - differnet header
ENCFF841TKB <- readBed("ENCFF841TKB.bed", broadpeak.bed.header)
crv.dat$ENCFF841TKB <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCFF841TKB, chr, 'q' )))
rm(ENCFF841TKB)
print("done!")
# ----------------------------------
# ----- methylation ------ #
print("adding methylation...")
# ENCSR000DED 2011
# RRBS on testis (w/ replicate)
dat1 <- readBed("ENCFF001TKY.bed", methyl.bed.header)
dat2 <- readBed("ENCFF001TKZ.bed", methyl.bed.header)
ENCSR000DED <- joinMethylReplicates( dat1, dat2 )
rm(dat1)
rm(dat2)
write.table( convertToBED(ENCSR000DED), "methyl1.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR000DED <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR000DED, chr, "percentMeth" )))
rm(ENCSR000DED)
# ENCSR080YRO
# RRBS on testis (w/ replicate)
# ---
dat1 <- readBed("ENCFF001TPW.bed", methyl.bed.header)
dat2 <- readBed("ENCFF001TPX.bed", methyl.bed.header)
ENCSR080YRO <- joinMethylReplicates( dat1, dat2, chr )
write.table( convertToBED(ENCSR080YRO), "ENCSR080YRO.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR080YRO <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR080YRO, chr, "percentMeth" )))
rm(ENCSR080YRO)
methyl.bed.header.short <- methyl.bed.header[c(1,2,3,5,6,11)]
# ENCSR011HZJ 2017 - these are in grch38!!
# CHG meth
ENCFF507JBR <- readBed(paste("ENCFF507JBR/", chr, ".bed", sep = ""), methyl.bed.header.short)
crv.dat$ENCFF507JBR <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnotMethyl, ENCFF507JBR, chr, "percentMeth" )))
rm(ENCFF507JBR)
# CHH meth
ENCFF038JFQ <- readBed(paste("ENCFF038JFQ/", chr, ".bed", sep = ""), methyl.bed.header.short[c(1,2,3,4,6)])
crv.dat$ENCFF038JFQ <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnotMethyl, ENCFF038JFQ, chr, "percentMeth" )))
rm(ENCFF038JFQ)
# CpG meth
ENCFF715DMX <- readBed(paste("ENCFF715DMX/", chr, ".bed", sep = ""), methyl.bed.header.short[c(1,2,3,4,6)])
crv.dat$ENCFF715DMX <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnotMethyl, ENCFF715DMX, chr, "percentMeth" )))
rm(ENCFF715DMX)
# ENCSR806NNG 2018
# idats -----
# the pair of idat files needs to have the same basename, and be appened with "_Red" and "_Grn"
# eg, ENCFF001RHW.idat becomes ENCSR000ABD_Red.idat
# and ENCFF001RHX.idat becomes ENCSR000ABD_Grn.idat
# cell.line, adult male testis
# Whole genome seq
ENCSR806NNG <- readMethylIdat("ENCSR000ABD")
write.table( convertToBED(ENCSR806NNG), "ENCSR806NNG", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR806NNG <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR806NNG, chr, "name" )))
rm(ENCSR806NNG)
# ENCSR304AIL
# tissue, adult male testis
# DNAme
ENCSR304AIL <- readMethylIdat( "ENCSR304AIL" )
write.table( convertToBED(ENCSR304AIL), "ENCSR304AIL", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR304AIL <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR304AIL, chr, "name" )))
rm(ENCSR304AIL)
# ENCSR304PMI
# RRBS cell line
ENCSR304PMI <- readMethylIdat("ENCSR304PMI")
write.table( convertToBED(ENCSR304PMI), "ENCSR304PMI.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR304PMI <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR304PMI, chr, "name" )))
rm(ENCSR304PMI)
# ENCSR962XHD 2017
# DNAme
# cell line
ENCSR962XHD <- readMethylIdat("ENCLB381OUG")
write.table( convertToBED(ENCSR962XHD), "ENCSR962XHD.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR962XHD <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR962XHD, chr, "name" )))
rm(ENCSR962XHD)
# ENCSR942OLI 2017
# tissue adult male testis
# DNAme
ENCSR942OLI <- readMethylIdat("ENCSR942OLI")
write.table( convertToBED(ENCSR942OLI), "ENCSR942OLI.bed", col.names = TRUE, row.names = FALSE, quote = FALSE)
crv.dat$ENCSR942OLI <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnotMethyl, ENCSR942OLI, chr, "name" )))
rm(ENCSR942OLI)
print("done!")
# ----------------------------------
# ---------- transcription start sites --------- #
print("adding transcription start sites...")
# what are the values here
# adult male testis RAMPAGE
# gene quantifications, or transcription start sites?
# ENCSR866SRG 2016
#dat <- read.table("ENCFF874RBV.tsv", header = FALSE)
#tss.header <- c("chr", "Start", "End", "Name1", "Score", "Strand", "idk", "gene1", "Gene", "gene2")
# colnames(dat) <- tss.header
tss.header <- c("Chrom", "Start", "End", "Name1", "const", "Strand", "Score", "Name2", "Name3", "Name4", "coords")
ENCSR866SRG = readBed("ENCFF648HUU.bed", tss.header)
crv.dat$ENCSR866SRG <- convertToBinInt(unlist(lapply(crv.dat$h19pos, attachSnpAnnot, ENCSR866SRG, chr, "Score")))
rm(ENCSR866SRG)
# ENCSR841EQJ
# rampage of testis
# columns 4, 8, 9, 10 all look like they have identical information
ENCSR841EQJ <- readBed("ENCFF162KMZ.bed", tss.header)
crv.dat$ENCSR841EQJ <- convertToBinInt(unlist(lapply(crv.dat$h19pos, attachSnpAnnot, ENCSR841EQJ,chr, "Score")))
rm(ENCSR841EQJ)
print("done!")
# ---------------------------------------------- #
# ------- transcription factor binding site ---- #
print("adding trnascription factor binding sites...")
# ENCSR000EWY 2011
# ZNF274
ENCSR000EWY <- readBed("ENCFF637LOO.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EWY <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR000EWY, chr, "q" )))
rm(ENCSR000EWY)
# ENCSR000EXG 2011
# YY1
#hg38
ENCSR000EXG <- readBed("ENCFF293PVG.bed", narrowpeak.bed.header)
crv.dat$ENCSR000EXG <- convertToBinInt(unlist(lapply( crv.dat$h38pos, attachSnpAnnot, ENCSR000EXG, chr, "q" )))
rm(ENCSR000EXG)
# ENCSR981CID 2017
# CTCF adult male 54
ENCSR981CID <- readBed("ENCFF885KKQ.bed", narrowpeak.bed.header)
crv.dat$ENCSR981CID <- convertToBinInt(unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR981CID, chr, "q" )))
rm(ENCSR981CID)
# ENCSR803FAP 2017
# POLR2A
ENCSR803FAP <- readBed("ENCFF940TNN.bed", narrowpeak.bed.header)
crv.dat$ENCSR803FAP <- unlist(lapply( crv.dat$h19pos, attachSnpAnnot, ENCSR803FAP, chr, "q"))
rm(ENCSR803FAP)
print("done!")
# --------------------------------------------- #
# local data from straun; use files with ld > .4
struan.dat1 <- read.table("struan/Testis_Cancer_novel_NTERA2_0.4_snpInOe_ann.csv", sep = ",", header = TRUE)
#struan.dat2 <- read.table("struan/Testis_Cancer_novel_NTERA2_snpInOe_ann_new.csv", sep = ",", header = TRUE)
struan.dat1.rep <- read.table("struan/Testis_Cancer_replicate_NTERA2_0.4_snpInOe_ann.csv", sep = ",", header = TRUE)
struan.dat <- rbind(struan.dat1, struan.dat1.rep)
struan.dat$INT_FRAG <- as.character(struan.dat$INT_FRAG)
tmp <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[2]))
struan <- data.frame( Chrom <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[1])),
Start <- unlist(lapply(strsplit(tmp, "-"), function(x) x[1])),
End <- unlist(lapply(strsplit(tmp, "-"), function(x) x[2])))
colnames(struan) <- c("Chrom", "Start", "End")
# this line is a duplicate
struan <- struan[-17,]
#write.table(struan, "struan.bed", col.names = F, row.names = F, quote = F)
crv.dat$HIC <- 0
crv.dat$HIC[snpAnnotSimple(crv.dat$h19pos, struan, chr)] <- 1
# local cell line data
EP2102 <- read.table("forChey/2102EP.bed", header = F)
crv.dat$EP2102 <- 0
crv.dat$EP2102[snpAnnotSimple(crv.dat$h19pos, EP2102, chr)] <- 1
TCAM2 <- read.table("forChey/TCAM2.bed", header = F)
crv.dat$TCAM2 <- 0
crv.dat$TCAM2[snpAnnotSimple(crv.dat$h19pos, TCAM2, chr)] <- 1
NTERA2 <- read.table("forChey/NTERA2.bed", header = F)
crv.dat$NTERA2 <- 0
crv.dat$NTERA2[snpAnnotSimple(crv.dat$h19pos, NTERA2, chr)] <- 1
NCCIT <- read.table("forChey/NCCIT.bed", header = F)
crv.dat$NCCIT <- 0
crv.dat$NCCIT[snpAnnotSimple(crv.dat$h19pos, NCCIT, chr)] <- 1
# ----- end annotations
# qc
k <- dim(crv.dat)[2]
print("annotation qc- remove empty columns and NA values...")
crv.dat[,11:k] <- apply(crv.dat[,11:k], 2, replaceNA)
is.emptyAnnot <- function( x )
{
return(all(x == 0))
}
# ---
# remove any annotations that had no matches in the credset
ind <- apply(crv.dat, 2, is.emptyAnnot)
crv.dat.bak <- crv.dat
crv.dat <- crv.dat[,!ind]
print("done!")
print(paste("crv.dat has ", dim(crv.dat)[2], " columns", sep = ""))
if(dim(crv.dat)[2] == 10)
{
print("no valid annotations! exiting")
stop()
}
# write the output for PAINTOR
print("writing output...")
ANNOTFILE <- paste(INFILENAME, "/", INFILENAME, ".annotations", sep = "")
annot <- crv.dat[,11:dim(crv.dat)[2]]
write.table(annot, ANNOTFILE,
col.names = T, row.names = F, quote = F)
# note number of snps and number of redundant snp
annotation.names <- paste(colnames(annot), collapse = ",")
ANNOTNAMESFILE <- paste(INFILENAME, "annotation.names", sep = "/")
write.table(annotation.names, ANNOTNAMESFILE, quote = F, col.names = F, row.names = F, append = F)
INPUTFILES <- paste(INFILENAME, "input.files", sep = "/")
write.table(INFILENAME, INPUTFILES, quote = F, col.names = F, row.names = F, append = F)
print("done!")
print("successfully completed")
xrange <- c(32008931, 320083111)
bed <- readBed( "ENCFF082VKJ.bed", narrowpeak.bed.header)
#p1 <- ggplot(data = bed, aes(x = Start)) + geom_area(stat = "bin", binwidth = 50)
#p1
bed <- bed[ !is.na(bed$Chrom), ]
bed <- bed[ as.character(bed$Chrom) == "chr6" & bed$Start >= xrange[1] & bed$End <= xrange[2],]
# TNXB: 32008931 - 320083111
p1 <- ggplot(data = bed, aes( x = Start, y = -log10(p))) + geom_area(fill = "blue")
p1
p1 <- ggplot(data = hb) + geom_rect(aes(xmin = BP1, xmax = BP2, ymin = 0.975, ymax = 1.025), fill = "black") +
geom_hline(yintercept = 1, size = 2)
p1
# -- find eqtls
eqtl <- read.table("significantresults_TECAC.GTEx.csv", sep = ",", header = TRUE)
eqtl$Chr <- as.integer(gsub("chr", "", eqtl$Chr))
#eqtl <- eqtl[which(eqtl$Tissue == "Testis"),]
CHR <- unique(crv.dat$chr)
eqtl <- eqtl[eqtl$Chr == CHR,]
novel.genes <- c("PPP2R5A",
"BCL2L11",
"TERT",
"TNXB",
"BAK1",
"DEPTOR",
"DMRT1",
"PSMB7",
"ANAPC2",
"ITIH5",
"ARL14EP",
"RAD52",
"METTL7A",
"SP1",
"CYTH1",
"ENOSF1",
"CTC459",
"ZNF217",
"FAM48B1",
"AR",
"CENPI",
"TKTL1")
tmp.gene <- c("DEPTOR", "PSMB7",
"ITIH5", "METTL7A",
"CTC459", "ZNF217",
"FAM48B1")
tmp.ensg <- c("ENSG00000155792", "ENSG00000136930",
"ENSG00000123243", "ENSG00000185432",
"ENSG00000267575", "ENSG0000171940",
"ENSG00000223731")
nov.ensg <- gene.key[ gene.key$gene.id %in% novel.genes,]
ensg <- c(tmp.ensg, as.character(nov.ensg$ensembl))
eqtl2 <- eqtl[ eqtl$GeneName %in% c(tmp.gene, as.character(nov.ensg$gene.id)),]
genes <- unique(c(tmp.gene, as.character(nov.ensg$gene.id)))
fuma <- read.table("~/Desktop/gtex_v8_ts_avg_log2TPM_exp.txt", header = TRUE)
fuma.sml <- fuma[ , colnames(fuma) %in% c("ensg", "symbol", "Testis")]
fuma.sml <- fuma.sml[ fuma.sml$ensg %in% ensg,]
# Beta_GWAS is based off of the FIRST snp in ALlele_GWAS
# Beta_eQTL is based off of the SECOND snp in allele_eqtl
# ex: chr8:119914393, BetaGWAS = -0.1259, Allele_GWAS = g_c,
# BetaEqtl = -0.276993, Allele_eqtl = C_G
# these are both based off the C allele
# BCL2L11
# betaGWAS -0.1147, alleGWAS c_t
# betaEQTL -0.129164 alleleEQTL C_T
# eqtls from shweta are in hg38
ind = inEqtl( crv.dat$h38pos[32], eqtl)
sum( !is.na( unique(eqtl$Tissue[ind])))
c("Testis", "testis") %in% eqtl$Tissue[ind]
crv.dat <- crv.dat.bak
k = dim(crv.dat)[2]
# plot annotation matrix
mat <- as.matrix(crv.dat[,11:k])
colnames(mat) <- colnames(crv.dat)[11:k]
rownames(mat) <- crv.dat$SNP.ID.hg19
annot <- read.table("../annotation-table.csv", header = T, sep = ",")
annot$Annotation %in% colnames(mat)
# drop utr3', utr5', exons
k = dim(mat)[2]
mat <- mat[,4:k]
# aligns annotations to whats in the text database
# this is used for ordering the annotations eg by type
ind = match(as.character(annot$Annotation), colnames(mat))
mat <- mat[,ind]
u.stat <- read.table(paste(getwd(), INFILENAME, "univariate.stats", sep = "/"), header = F)
u.stat <- as.character(u.stat$V2)
u.stat <- data.frame( annot = unlist(lapply(strsplit(u.stat, ","), function(x) return(x[1]))),
p = unlist(lapply(strsplit(u.stat, ","), function(x) return(x[2]))) )
u.stat$p <- as.numeric(as.character(u.stat$p))
mat <- melt(mat)
ind <- match(mat$Var2, u.stat$annot )
ind2 <- mat$value != 0
mat$value[!is.na(ind) ] <- u.stat$p[ ind[!is.na(ind)]]
mat$value[!ind2] <- 0
mat$log.value <- -log10(mat$value)
mat$log.value[ mat$log.value == Inf ] <- NA
mat$Var2 <- as.character(mat$Var2)
write.table(mat, paste0(INFILENAME, "_annot_plot.txt"), col.names = TRUE, quote = F, row.names = F)
library(RColorBrewer)
mat$log.value[ mat$log.value == 0 ] <- NA
p1 <- ggplot(data = mat, aes(x = Var2, y = Var1, fill=log.value)) + geom_tile() +
geom_tile(data = mat[ mat$log.value != 0,], color = "black") +
xlab("Annotation") +
ylab("CRV") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5),
# legend.position = "none",
panel.border = element_rect(color = "black", fill = NA)) +
# ggtitle(gsub("_", ":", INFILENAME)) +
scale_fill_gradient2( low = "red", mid = "white", high = "green",
midpoint = -log10(0.05),
breaks = c(-1, -log10(0.05), -log10(0.001), -log10(0.0001)),
labels = c(1, 0.05, 0.001, 0.0001),
name = "p-value",
limits = c(-1,4),
na.value = "white")
p1
# scale_fill_gradientn( colors = brewer.pal(n = 9, name = "Blues"),
# breaks = c(0, -log10(0.05), -log10(0.001), -log10(0.00001)),
# labels = c(1, 0.05, 0.001, 0.00001),
# name = "p-value",
# limits = c(0,5),
# na.value = "white")
#p1
# scale_fill_gradient2( guide = "colourbar",
# low = "white",
# high = "royalblue3",
# midpoint = -log10(1),
# limits = c(0,4),
# breaks = c(0, -log10(0.05), -log10(0.001), -log10(0.0001)),
# labels = c(1, 0.05, 0.001, 0.0001),
# name = "p-value")
png(paste0(INFILENAME, "_annot.png"), width = 600, height = dim(crv.dat)[1] * 15)
print(p1)
dev.off()
# PAINTOR debugging-
# make sure tabs instead of spaces
# ld matrix is invertable
# same number of snps in all files
# run paintor as follows:
#~/PAINTOR_V3.0/PAINTOR -input input.files -in . -out .
# -Zhead z -LDname ld -Gname MVTEST -Lname BF_MVTEST -enumerate 4 -annotations H3K36me3,H3K4me3.cisreg
# use getLRT.p to get the pvalue comparing annotation model to baseline
dat <- read.table("hic_snps.txt",header = F )
# empty cells
dat <- dat[-549,]
dat$bp <- unlist(lapply(strsplit(dat$V1, "--"), function(x) x[1]))
dat$refbp <- unlist(lapply(strsplit(dat$V1, "_"), function(x) x[2]))
dat$chr <- unlist(lapply(strsplit(dat$V1, "--"), function(x) x[2]))
dat$chr <- paste0("chr", unlist(lapply(strsplit(dat$chr, "_"), function(x) x[1])))
struan.dat <- rbind(struan.dat1, struan.dat1.rep)
struan.dat$INT_FRAG <- as.character(struan.dat$INT_FRAG)
tmp <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[2]))
struan <- data.frame( Chrom <- unlist(lapply(strsplit(struan.dat$INT_FRAG, ":"), function(x) x[1])),
Start <- unlist(lapply(strsplit(tmp, "-"), function(x) x[1])),
End <- unlist(lapply(strsplit(tmp, "-"), function(x) x[2])))
colnames(struan) <- c("Chrom", "Start", "End")
# this line is a duplicate
struan <- struan[-17,]
#write.table(struan, "struan.bed", col.names = F, row.names = F, quote = F)
dat$bp <- as.integer(dat$bp)
getOverlap <- function( dat1, dat2)
{
ind <- rep(0, dim(dat1)[1])
for( i in 1:dim(dat1)[1])
{
if( length( snpAnnotSimple(dat1$bp[i], dat2, dat1$chr[i]) ) > 0 )
{
ind[i] <- 1
}
}
return(ind)
}
dat$struan <- getOverlap(dat, struan)
dat$EP2102 <- getOverlap(dat, EP2102)
dat$TCAM2 <- getOverlap(dat, TCAM2)
dat$NNCIT <- getOverlap(dat, NCCIT)
dat$NTERA2 <- getOverlap(dat, NTERA2)
write.table(dat, "hic_ATAC_seq.txt", row.names = F, col.names = T, quote = F)
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(RJafroc)
## -----------------------------------------------------------------------------
UtilFigureOfMerit(datasetROI, FOM = "ROI")
fom <- UtilFigureOfMerit(datasetROI, FOM = "ROI")
## -----------------------------------------------------------------------------
ret <- StSignificanceTesting(datasetROI, FOM = "Wilcoxon")
str(ret)
## -----------------------------------------------------------------------------
ret$varComp
## -----------------------------------------------------------------------------
ret$FTestStatsRRRC$fRRRC
ret$FTestStatsRRRC$ndfRRRC
ret$FTestStatsRRRC$ddfRRRC
ret$FTestStatsRRRC$pRRRC
## -----------------------------------------------------------------------------
ret$ciDiffTrtRRRC
## -----------------------------------------------------------------------------
ret$FTestStatsFRRC$fFRRC
ret$FTestStatsFRRC$ndfFRRC
ret$FTestStatsFRRC$ddfFRRC
ret$FTestStatsFRRC$pFRRC
## -----------------------------------------------------------------------------
ret$ciDiffTrtFRRC
## -----------------------------------------------------------------------------
ret$FTestStatsRRFC$fRRFC
ret$FTestStatsRRFC$ndfRRFC
ret$FTestStatsRRFC$ddfRRFC
ret$FTestStatsRRFC$pRRFC
## -----------------------------------------------------------------------------
ret$ciDiffTrtRRFC
| /vignettes/Ch30Vig2RoiParadigmSt.R | no_license | pwep/RJafroc | R | false | false | 1,444 | r | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(RJafroc)
## -----------------------------------------------------------------------------
UtilFigureOfMerit(datasetROI, FOM = "ROI")
fom <- UtilFigureOfMerit(datasetROI, FOM = "ROI")
## -----------------------------------------------------------------------------
ret <- StSignificanceTesting(datasetROI, FOM = "Wilcoxon")
str(ret)
## -----------------------------------------------------------------------------
ret$varComp
## -----------------------------------------------------------------------------
ret$FTestStatsRRRC$fRRRC
ret$FTestStatsRRRC$ndfRRRC
ret$FTestStatsRRRC$ddfRRRC
ret$FTestStatsRRRC$pRRRC
## -----------------------------------------------------------------------------
ret$ciDiffTrtRRRC
## -----------------------------------------------------------------------------
ret$FTestStatsFRRC$fFRRC
ret$FTestStatsFRRC$ndfFRRC
ret$FTestStatsFRRC$ddfFRRC
ret$FTestStatsFRRC$pFRRC
## -----------------------------------------------------------------------------
ret$ciDiffTrtFRRC
## -----------------------------------------------------------------------------
ret$FTestStatsRRFC$fRRFC
ret$FTestStatsRRFC$ndfRRFC
ret$FTestStatsRRFC$ddfRRFC
ret$FTestStatsRRFC$pRRFC
## -----------------------------------------------------------------------------
ret$ciDiffTrtRRFC
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{make_filename}
\alias{make_filename}
\title{make_filename}
\usage{
make_filename(year)
}
\arguments{
\item{year}{A valid year}
}
\value{
This function returns a name for the file with the given year.
}
\description{
This function creates a filename wrapping a year given as input and "accident_%d.csv.bz2"
}
\note{
use functions "as.integer" and "sprintf"
}
\examples{
make_filename(2010)
}
| /man/make_filename.Rd | no_license | fardl/framirez | R | false | true | 493 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{make_filename}
\alias{make_filename}
\title{make_filename}
\usage{
make_filename(year)
}
\arguments{
\item{year}{A valid year}
}
\value{
This function returns a name for the file with the given year.
}
\description{
This function creates a filename wrapping a year given as input and "accident_%d.csv.bz2"
}
\note{
use functions "as.integer" and "sprintf"
}
\examples{
make_filename(2010)
}
|
.onLoad <- function(libname, pkgname) {
op <- options()
op.devtools <- list(
dtt.default_tz = "UTC"
)
toset <- !(names(op.devtools) %in% names(op))
if (any(toset)) options(op.devtools[toset])
invisible()
}
| /R/zzz.R | permissive | poissonconsulting/dttr2 | R | false | false | 223 | r | .onLoad <- function(libname, pkgname) {
op <- options()
op.devtools <- list(
dtt.default_tz = "UTC"
)
toset <- !(names(op.devtools) %in% names(op))
if (any(toset)) options(op.devtools[toset])
invisible()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.R
\name{order.hclust}
\alias{order.hclust}
\title{Ordering of the Leaves in a hclust Dendrogram}
\usage{
order.hclust(x, ...)
}
\arguments{
\item{x}{ab hclust object a distance matrix.}
\item{...}{Ignored.}
}
\value{
A vector with length equal to the number of
leaves in the hclust dendrogram is returned.
From r <- order.hclust(), each element is
the index into the original data
(from which the hclust was computed).
}
\description{
Ordering of the Leaves in a hclust Dendrogram. Like \link{order.dendrogram}.
}
\examples{
set.seed(23235)
ss <- sample(1:150, 10 )
hc <- iris[ss,-5] \%>\% dist \%>\% hclust
# dend <- hc \%>\% as.dendrogram
order.hclust(hc)
}
\seealso{
\link{order.dendrogram}
}
| /man/order.hclust.Rd | no_license | JohnMCMa/dendextend | R | false | true | 789 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.R
\name{order.hclust}
\alias{order.hclust}
\title{Ordering of the Leaves in a hclust Dendrogram}
\usage{
order.hclust(x, ...)
}
\arguments{
\item{x}{ab hclust object a distance matrix.}
\item{...}{Ignored.}
}
\value{
A vector with length equal to the number of
leaves in the hclust dendrogram is returned.
From r <- order.hclust(), each element is
the index into the original data
(from which the hclust was computed).
}
\description{
Ordering of the Leaves in a hclust Dendrogram. Like \link{order.dendrogram}.
}
\examples{
set.seed(23235)
ss <- sample(1:150, 10 )
hc <- iris[ss,-5] \%>\% dist \%>\% hclust
# dend <- hc \%>\% as.dendrogram
order.hclust(hc)
}
\seealso{
\link{order.dendrogram}
}
|
# Script to plot coverage values for each of the "18 Loci"
# Color palette, from https://www.r-bloggers.com/the-paul-tol-21-color-salute/
tol21rainbow <- c("#771155", "#AA4488", "#CC99BB", "#114477", "#4477AA", "#77AADD", "#117777", "#44AAAA", "#77CCCC", "#117744", "#44AA77", "#88CCAA", "#777711", "#AAAA44", "#DDDD77", "#774411", "#AA7744", "#DDAA77", "#771122", "#AA4455", "#DD7788")
# Read in the data table
coverage <- read.table("/Volumes/Data_Disk/Dropbox/GitHub/Deleterious_GP/Data/18Loci_Coverage.txt", header=T)
genes <- as.character(levels(coverage$Gene))
# Make a plot for each gene
pdf(file="18Loci_Coverage.pdf", width=20, height=20)
par(mfrow=c(5, 4))
sapply(
genes,
function(x) {
genecov <- coverage[coverage$Gene == x,]
samplenames <- colnames(coverage)[3:21]
# Get the highest observed coverage for the gene at any point - this
# is the boundary of our plot
maxcov <- max(as.matrix(genecov[,3:21]))
# And get the length of the gene, add for legend padding
maxpos <- max(genecov$Pos) + 200
# Open a blank plot. Again, this is hardcoded and should change, but
# it is easier for now
plot(c(0, 0), ylim=c(0, maxcov), xlim=c(0, maxpos), type="n", xlab="Gene Position (bp)", ylab="Number of Reads", main=paste("Coverage Over", x))
sapply(
seq_along(samplenames),
function(y) {
lines(genecov[,samplenames[y]] ~ genecov$Pos, col=tol21rainbow[y], lwd=1)
}
)
legend(
"topright",
gsub("X", "", samplenames),
col=tol21rainbow,
lwd=1,
cex=0.55)
}
)
dev.off()
| /Analysis_Scripts/Plotting/Plot_18Loci_Coverage.R | no_license | MorrellLAB/Deleterious_GP | R | false | false | 1,723 | r | # Script to plot coverage values for each of the "18 Loci"
# Color palette, from https://www.r-bloggers.com/the-paul-tol-21-color-salute/
tol21rainbow <- c("#771155", "#AA4488", "#CC99BB", "#114477", "#4477AA", "#77AADD", "#117777", "#44AAAA", "#77CCCC", "#117744", "#44AA77", "#88CCAA", "#777711", "#AAAA44", "#DDDD77", "#774411", "#AA7744", "#DDAA77", "#771122", "#AA4455", "#DD7788")
# Read in the data table
coverage <- read.table("/Volumes/Data_Disk/Dropbox/GitHub/Deleterious_GP/Data/18Loci_Coverage.txt", header=T)
genes <- as.character(levels(coverage$Gene))
# Make a plot for each gene
pdf(file="18Loci_Coverage.pdf", width=20, height=20)
par(mfrow=c(5, 4))
sapply(
genes,
function(x) {
genecov <- coverage[coverage$Gene == x,]
samplenames <- colnames(coverage)[3:21]
# Get the highest observed coverage for the gene at any point - this
# is the boundary of our plot
maxcov <- max(as.matrix(genecov[,3:21]))
# And get the length of the gene, add for legend padding
maxpos <- max(genecov$Pos) + 200
# Open a blank plot. Again, this is hardcoded and should change, but
# it is easier for now
plot(c(0, 0), ylim=c(0, maxcov), xlim=c(0, maxpos), type="n", xlab="Gene Position (bp)", ylab="Number of Reads", main=paste("Coverage Over", x))
sapply(
seq_along(samplenames),
function(y) {
lines(genecov[,samplenames[y]] ~ genecov$Pos, col=tol21rainbow[y], lwd=1)
}
)
legend(
"topright",
gsub("X", "", samplenames),
col=tol21rainbow,
lwd=1,
cex=0.55)
}
)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{read.data}
\alias{read.data}
\alias{read.data.default}
\alias{read.data.matrix}
\title{Read geochronology data}
\usage{
read.data(x, ...)
\method{read.data}{default}(x, method = "U-Pb", format = 1, ...)
\method{read.data}{matrix}(x, method = "U-Pb", format = 1, ...)
}
\arguments{
\item{x}{a file name (\code{.csv} format) or matrix}
\item{...}{optional arguments to the \code{read.csv} function}
\item{method}{one of \code{'U-Pb'}, \code{'Ar-Ar'},
\code{'detritals'} \code{'U-Th-He'}, \code{'fissiontracks'} or
\code{'other'}}
\item{format}{formatting option, depends on the value of
\code{method}.
- if \code{method = 'Ar-Ar'}, then \code{format} is one of either:
\enumerate{
\item{\code{39/40, s[39/40], 36/40, s[36/40], 39/36, s[39/36]}}
\item{\code{39, 39/40, s[39/40], 36/40, s[36/40], 39/36, s[39/36]}}
}
- if \code{method = 'fissiontracks'}, then \code{format} is
one of either:
\enumerate{
\item{the External Detector Method (EDM), which requires a
\eqn{\zeta}-calibration constant and its uncertainty, the induced
track density in a dosimeter glass, and a table with the
spontaneous and induced track densities.}
\item{LA-ICP-MS-based fission track data using the
\eqn{\zeta}-calibration method, which requires a 'session
\eqn{\zeta}' and its uncertainty and a table with the number of
spontaneous tracks, the area over which these were counted and one
or more U/Ca- or U-concentration measurements and their analytical
uncertainties.}
\item{LA-ICP-MS-based fission track data using the 'absolute
dating' method, which only requires a table with the the number of
spontaneous tracks, the area over which these were counted and one
or more U/Ca- or U-concentration measurements and their analytical
uncertainties.}
}}
}
\value{
an object of class \code{UPb}, \code{ArAr}, \code{UThHe},
\code{detritals} \code{fissiontracks} or \code{other}
}
\description{
Cast a \code{.csv} file or a matrix into one of \code{IsoplotR}'s
data classes
}
\examples{
# load one of the built-in .csv files:
data(examples)
concordia(examples$UPb)
}
| /man/read.data.Rd | no_license | zimshady/IsoplotR | R | false | true | 2,147 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{read.data}
\alias{read.data}
\alias{read.data.default}
\alias{read.data.matrix}
\title{Read geochronology data}
\usage{
read.data(x, ...)
\method{read.data}{default}(x, method = "U-Pb", format = 1, ...)
\method{read.data}{matrix}(x, method = "U-Pb", format = 1, ...)
}
\arguments{
\item{x}{a file name (\code{.csv} format) or matrix}
\item{...}{optional arguments to the \code{read.csv} function}
\item{method}{one of \code{'U-Pb'}, \code{'Ar-Ar'},
\code{'detritals'} \code{'U-Th-He'}, \code{'fissiontracks'} or
\code{'other'}}
\item{format}{formatting option, depends on the value of
\code{method}.
- if \code{method = 'Ar-Ar'}, then \code{format} is one of either:
\enumerate{
\item{\code{39/40, s[39/40], 36/40, s[36/40], 39/36, s[39/36]}}
\item{\code{39, 39/40, s[39/40], 36/40, s[36/40], 39/36, s[39/36]}}
}
- if \code{method = 'fissiontracks'}, then \code{format} is
one of either:
\enumerate{
\item{the External Detector Method (EDM), which requires a
\eqn{\zeta}-calibration constant and its uncertainty, the induced
track density in a dosimeter glass, and a table with the
spontaneous and induced track densities.}
\item{LA-ICP-MS-based fission track data using the
\eqn{\zeta}-calibration method, which requires a 'session
\eqn{\zeta}' and its uncertainty and a table with the number of
spontaneous tracks, the area over which these were counted and one
or more U/Ca- or U-concentration measurements and their analytical
uncertainties.}
\item{LA-ICP-MS-based fission track data using the 'absolute
dating' method, which only requires a table with the the number of
spontaneous tracks, the area over which these were counted and one
or more U/Ca- or U-concentration measurements and their analytical
uncertainties.}
}}
}
\value{
an object of class \code{UPb}, \code{ArAr}, \code{UThHe},
\code{detritals} \code{fissiontracks} or \code{other}
}
\description{
Cast a \code{.csv} file or a matrix into one of \code{IsoplotR}'s
data classes
}
\examples{
# load one of the built-in .csv files:
data(examples)
concordia(examples$UPb)
}
|
#@#require rss/1.0
#@#require rss/dublincore
= module RSS::ImageItemModel
== Instance Methods
--- image_item
--- image_item=
#@todo
= class RSS::ImageItemModel::ImageItem < RSS::Element
include DublinCoreModel
== Instance Methods
--- about
--- about=
#@todo
--- date
--- date=
#@todo
--- image_height
--- image_height=
--- height
--- height=
#@todo
--- image_width
--- image_width=
--- width
--- width=
#@todo
--- resource
--- resource=
#@todo
= module RSS::ImageFaviconModel
== Instance Methods
--- image_favicon
--- image_favicon=
#@todo
= class RSS::ImageFaviconModel::ImageFavicon < RSS::Element
include DublinCoreModel
== Instance Methods
--- about
--- about=
#@todo
--- date
--- date=
#@todo
--- image_size
--- size
--- image_size=
--- size=
#@todo
| /target/rubydoc/refm/api/src/rss/image.rd | no_license | nacyot/omegat-rurima-ruby | R | false | false | 772 | rd | #@#require rss/1.0
#@#require rss/dublincore
= module RSS::ImageItemModel
== Instance Methods
--- image_item
--- image_item=
#@todo
= class RSS::ImageItemModel::ImageItem < RSS::Element
include DublinCoreModel
== Instance Methods
--- about
--- about=
#@todo
--- date
--- date=
#@todo
--- image_height
--- image_height=
--- height
--- height=
#@todo
--- image_width
--- image_width=
--- width
--- width=
#@todo
--- resource
--- resource=
#@todo
= module RSS::ImageFaviconModel
== Instance Methods
--- image_favicon
--- image_favicon=
#@todo
= class RSS::ImageFaviconModel::ImageFavicon < RSS::Element
include DublinCoreModel
== Instance Methods
--- about
--- about=
#@todo
--- date
--- date=
#@todo
--- image_size
--- size
--- image_size=
--- size=
#@todo
|
rm(list = ls(all = TRUE))
source("R/packages.R")
source("R/functions.R")
## process data
source("R/r1_process_data.R")
summary(comm_biom) # community above/belowground biomass
summary(root_trait) # root traits (length, surface area etc.) with associated shoot and root mass
summary(diameter_classification) # diameter classification
summary(spp_biom) # shoot and root biomass by spp
## stat and figure
theme_set(theme_bw()) # set ggplot backgroud
rain_cols <- c("green4", "dodgerblue3", "red", "palegreen", "plum3") # colors to be used to plot rainfall treatments
source("R/r1_1_analyse_community_biomass.R") # analysis on community biomass
source("R/r1_2_analyse_root_traits.R") # analysis on root traits
source("R/r1_2_1_create_smmry_tbl_root_traits.R") # create summary table (.csv) based on the result of anslysis for root tratis
## run the lines below if one wants to save the sumamry table as an excel. This may take
## some time. If Java error occurred, restart R session and try again
# get_excel_tble_comm_biom()
# get_excel_tble_root_trait()
# save all objects
save.image("Output/Data/all_obj.RData")
| /R/analysis.R | no_license | ShunHasegawa/DRIGRASS_stem_root | R | false | false | 1,233 | r | rm(list = ls(all = TRUE))
source("R/packages.R")
source("R/functions.R")
## process data
source("R/r1_process_data.R")
summary(comm_biom) # community above/belowground biomass
summary(root_trait) # root traits (length, surface area etc.) with associated shoot and root mass
summary(diameter_classification) # diameter classification
summary(spp_biom) # shoot and root biomass by spp
## stat and figure
theme_set(theme_bw()) # set ggplot backgroud
rain_cols <- c("green4", "dodgerblue3", "red", "palegreen", "plum3") # colors to be used to plot rainfall treatments
source("R/r1_1_analyse_community_biomass.R") # analysis on community biomass
source("R/r1_2_analyse_root_traits.R") # analysis on root traits
source("R/r1_2_1_create_smmry_tbl_root_traits.R") # create summary table (.csv) based on the result of anslysis for root tratis
## run the lines below if one wants to save the sumamry table as an excel. This may take
## some time. If Java error occurred, restart R session and try again
# get_excel_tble_comm_biom()
# get_excel_tble_root_trait()
# save all objects
save.image("Output/Data/all_obj.RData")
|
# Loading Packages
require('RPostgreSQL')
require('dplyr')
require('tidyr')
require('readr')
# PostgresSQL Driver and Connection
drv <- dbDriver('PostgreSQL')
con <- dbConnect(drv,dbname = 'sfhqxipq',
host = 'drona.db.elephantsql.com',
user = 'sfhqxipq', password = 'Bq8j2SGQXNAUQiZJ-nOXp7BjNey_a2N3')
# Drop All Existing Tables
sql <- "
drop table if exists review cascade;
drop table if exists order_items cascade;
drop table if exists dma cascade;
drop table if exists orders cascade;
drop table if exists customer cascade;
drop table if exists acquire_source cascade;
drop table if exists zipcode cascade;
drop table if exists discount cascade;
drop table if exists loss_reason cascade;
drop table if exists loss_records cascade;
drop table if exists restocking_df cascade;
drop table if exists items_df cascade;
drop table if exists supplier_df cascade;
"
dbGetQuery(con, sql)
# Loading Dataset for Each Tables
dma<-read.csv("dma")
zipcode <- read.csv("zipcode")
acquire_source <- read.csv("acquire_source")
customer<-read.csv("customer")
review <- read.csv("review")
orders <- read.csv("orders")
discount <- read.csv("discount")
order_items <- read.csv("order_items")
# loss_reason
reason_id <- sprintf('%d', 1:6)
description <- c("employee carelessness", "transportation issue", "Unknown Causes", "Vendor Fraud", "Customer theft", "Employee Theft")
loss_reason <- data.frame(reason_id, description)
# restocking_df
restocking_df <- read.csv("restocking.csv")
restocking_df <- subset(restocking_df, select = -1)
restocking_df$sku <- sprintf('s%d',1:nrow(restocking_df))
#supplier_df
supplier_df <- read.csv("supplier.csv")
#items_df
items_df<-read.csv("items.csv")
#loss_records
loss_id <- sprintf('%d', 1:70)
loss_time <- sample(seq(as.Date('2016/01/01'), as.Date('2018/10/05'), by="day"), 70)
RandomSelectRoW <- items_df[sample(nrow(items_df), 70), ]
sku <- RandomSelectRoW %>% select(sku) %>%distinct()
quantity <- floor(runif(70, min=1, max=200))
reasons<-sample(reason_id, size = 70, replace=TRUE)
loss_records <- data.frame(loss_id, loss_time, sku, quantity,reasons)
#category
cat_id <- sprintf('CA%d', 1:21)
category_name <- c('AlcoholicBeverages', 'BabyBoutique','Bakery', 'Bread','Catering','Dairy','DeliandCheese',
'Floral','Freezer','Grocery','HealthandBeautyAids','HouseholdEssentials','Kitchen','MealKit',
'Meat','NonalcoholicBeverages','PetShop','Poultry','Produce','Seafood','Snacks')
category_df <- data.frame(cat_id, category_name)
#warehouse_managers
warehouse_manager_id <- sprintf('%d', 1:21)
warehouse_manager_name <- c('Gus Dobby','Leoine Lemmon','Karleen Meins','Herman Vonasek','Maryjane Baldin',
'Barbie Langfat','Cordelie Laflin','Bondy Suarez','Coral Hegge','Kiley Deppen',
'Gabi Matessian','Baudoin Feek','Kittie Board','Eulalie Knyvett','Klaus Grelak',
'Dorian Droghan','Ulrike Gerardet','Vilma Christofol','Gayleen Crallan','Sam Dixcee',
'Menard Shutler')
warehouse_manager_monthly_salary <- c(3000, 3500, 3100, 2900, 3000, 2900, 3000, 3100, 3200, 3300, 3000,
2900, 3500, 3400, 2900, 2700, 2000, 3000, 3000, 3000, 2700)
warehouse_manager_phone <- c('778-679-6952','820-906-0462','705-867-6655','537-843-9114','674-937-4518','552-777-1100',
'734-419-5652','466-557-3824','182-272-6334','531-784-6145','124-491-1014','284-328-5445',
'150-311-1344','886-211-7766','595-957-1934','966-790-4688','125-763-3116','546-805-9081',
'641-572-3731','959-890-9051','887-129-5496')
warehouse_managers_df <- data.frame(warehouse_manager_id, warehouse_manager_name, warehouse_manager_monthly_salary, warehouse_manager_phone)
#warehouse
warehouse_id <- sprintf('%d', 1:21)
warehouse_location <- c('19 Mayflower Street Brooklyn, NY 11211','2 Howard St. Brooklyn, NY 11215','4 Smoky Hollow St. Manhattan, NY 10002','823 Myrtle St. Bronx, NY 10469',
'936 Crescent St. Levittown, NY 11756', '7982 Brown Street Yonkers, NY 10701', '9076 Mammoth Street Spring Valley, NY 10977', '11 Border Street Huntington, NY 11743',
'41 Spruce St. Staten Island, NY 10314', '13 Sycamore Rd. Bronx, NY 10465', '9910 NE. Locust Court Bronx, NY 10466', '88 Miller St. Jamestown, NY 14701',
'68 North Orchard Ave. Brooklyn, NY 11216', '143 Oak Meadow Ave. Jamaica, NY 11434', '239 Rockcrest St. Woodside, NY 11377', '695 Rockwell St. Jamaica, NY 11432',
'7829 Bay St. Manhattan, NY 10033', '32A North Wagon Drive Bronx, NY 10463','8349 Bellevue St. Manhattan, NY 10023','908 Cherry Dr. Manhattan, NY 10003',
'9853 Brandywine Drive Lockport, NY 14094')
warehouse_phone <- c('859-611-5195', '608-127-5448', '285-491-0611', '839-149-3369', '450-592-9482', '922-365-2041', '513-604-1717', '451-323-3503', '390-900-3767','625-914-4000',
'269-297-5386', '637-432-9560', '357-278-5136', '167-770-6781', '346-154-9897', '431-385-8925', '727-649-6911', '349-791-2321', '571-807-8625', '217-481-8053','331-426-1570')
warehouse_manager_id <- sample(warehouse_manager_id, size = 21)
warehouse_df <- data.frame(warehouse_id, cat_id, warehouse_location, warehouse_phone, warehouse_manager_id)
# Write Data into PostgresSQL server
dbWriteTable(con, name = 'dma', value = dma, row.names = FALSE, append=TRUE)
print('1')
dbWriteTable(con, name = 'zipcode', value = zipcode, row.names = FALSE, append=TRUE)
print('2')
dbWriteTable(con, name = 'acquire_source', value = acquire_source, row.names = FALSE, append=TRUE)
print('3')
dbWriteTable(con, name = 'customer', value = customer, row.names = FALSE, append=TRUE)
print('4')
dbWriteTable(con, name = 'review', value = review, row.names = FALSE, append=TRUE)
print('5')
dbWriteTable(con, name = 'orders', value = orders, row.names = FALSE, append=TRUE)
print('6')
dbWriteTable(con, name = 'discount', value = discount, row.names = FALSE, append=TRUE)
print('7')
dbWriteTable(con, name = 'order_items', value = order_items, row.names = FALSE, append=TRUE)
print('8')
dbWriteTable(con, name='loss_reason', value=loss_reason, row.names=FALSE, append =TRUE)
print('9')
dbWriteTable(con, name = 'restocking_df', value = restocking_df, row.names = FALSE, append=TRUE)
print('10')
dbWriteTable(con, name = 'supplier_df', value = supplier_df, row.names = FALSE, append=TRUE)
print('11')
dbWriteTable(con, name = 'items_df', value = items_df, row.names = FALSE, append=TRUE)
print('12')
dbWriteTable(con, name="category", value=category_df, row.names=FALSE, append=TRUE)
print('13')
dbWriteTable(con, name="warehouse_managers", value=warehouse_managers_df, row.names=FALSE, append=TRUE)
print('14')
dbWriteTable(con, name="warehouse", value=warehouse_df, row.names=FALSE, append=TRUE)
print('15')
dbWriteTable(con, name='loss_records', value=loss_records, row.names=FALSE, append =TRUE)
print('16')
################# DEBUG #################
# Create Functions
age_level_func <-
"CREATE OR REPLACE FUNCTION Age_Level(age double precision)
RETURNS TEXT AS
$func$
DECLARE res text;
BEGIN
select case
when age < 30 then 'Young'
when age > 60 then 'Older'
ELSE 'Middle-Aged' end into res;
return res;
END;
$func$
LANGUAGE plpgsql;
"
income_level_func <- "
CREATE OR REPLACE FUNCTION Income_Level (income double precision)
RETURNS TEXT AS
$income_level$
DECLARE res TEXT;
BEGIN
select (case
when income < 50000 then 'Low'
when income > 150000 then 'High'
ELSE 'Middle'
end ) into res;
RETURN res;
END;
$income_level$
LANGUAGE plpgsql;
"
# Upload Function
dbGetQuery(con, age_level_func)
dbGetQuery(con, income_level_func)
| /Database/Data Prep & Upload.R | no_license | cptidiot/FreshDirect-Dashboard | R | false | false | 8,843 | r | # Loading Packages
require('RPostgreSQL')
require('dplyr')
require('tidyr')
require('readr')
# PostgresSQL Driver and Connection
drv <- dbDriver('PostgreSQL')
con <- dbConnect(drv,dbname = 'sfhqxipq',
host = 'drona.db.elephantsql.com',
user = 'sfhqxipq', password = 'Bq8j2SGQXNAUQiZJ-nOXp7BjNey_a2N3')
# Drop All Existing Tables
sql <- "
drop table if exists review cascade;
drop table if exists order_items cascade;
drop table if exists dma cascade;
drop table if exists orders cascade;
drop table if exists customer cascade;
drop table if exists acquire_source cascade;
drop table if exists zipcode cascade;
drop table if exists discount cascade;
drop table if exists loss_reason cascade;
drop table if exists loss_records cascade;
drop table if exists restocking_df cascade;
drop table if exists items_df cascade;
drop table if exists supplier_df cascade;
"
dbGetQuery(con, sql)
# Loading Dataset for Each Tables
dma<-read.csv("dma")
zipcode <- read.csv("zipcode")
acquire_source <- read.csv("acquire_source")
customer<-read.csv("customer")
review <- read.csv("review")
orders <- read.csv("orders")
discount <- read.csv("discount")
order_items <- read.csv("order_items")
# loss_reason
reason_id <- sprintf('%d', 1:6)
description <- c("employee carelessness", "transportation issue", "Unknown Causes", "Vendor Fraud", "Customer theft", "Employee Theft")
loss_reason <- data.frame(reason_id, description)
# restocking_df
restocking_df <- read.csv("restocking.csv")
restocking_df <- subset(restocking_df, select = -1)
restocking_df$sku <- sprintf('s%d',1:nrow(restocking_df))
#supplier_df
supplier_df <- read.csv("supplier.csv")
#items_df
items_df<-read.csv("items.csv")
#loss_records
loss_id <- sprintf('%d', 1:70)
loss_time <- sample(seq(as.Date('2016/01/01'), as.Date('2018/10/05'), by="day"), 70)
RandomSelectRoW <- items_df[sample(nrow(items_df), 70), ]
sku <- RandomSelectRoW %>% select(sku) %>%distinct()
quantity <- floor(runif(70, min=1, max=200))
reasons<-sample(reason_id, size = 70, replace=TRUE)
loss_records <- data.frame(loss_id, loss_time, sku, quantity,reasons)
#category
cat_id <- sprintf('CA%d', 1:21)
category_name <- c('AlcoholicBeverages', 'BabyBoutique','Bakery', 'Bread','Catering','Dairy','DeliandCheese',
'Floral','Freezer','Grocery','HealthandBeautyAids','HouseholdEssentials','Kitchen','MealKit',
'Meat','NonalcoholicBeverages','PetShop','Poultry','Produce','Seafood','Snacks')
category_df <- data.frame(cat_id, category_name)
#warehouse_managers
warehouse_manager_id <- sprintf('%d', 1:21)
warehouse_manager_name <- c('Gus Dobby','Leoine Lemmon','Karleen Meins','Herman Vonasek','Maryjane Baldin',
'Barbie Langfat','Cordelie Laflin','Bondy Suarez','Coral Hegge','Kiley Deppen',
'Gabi Matessian','Baudoin Feek','Kittie Board','Eulalie Knyvett','Klaus Grelak',
'Dorian Droghan','Ulrike Gerardet','Vilma Christofol','Gayleen Crallan','Sam Dixcee',
'Menard Shutler')
warehouse_manager_monthly_salary <- c(3000, 3500, 3100, 2900, 3000, 2900, 3000, 3100, 3200, 3300, 3000,
2900, 3500, 3400, 2900, 2700, 2000, 3000, 3000, 3000, 2700)
warehouse_manager_phone <- c('778-679-6952','820-906-0462','705-867-6655','537-843-9114','674-937-4518','552-777-1100',
'734-419-5652','466-557-3824','182-272-6334','531-784-6145','124-491-1014','284-328-5445',
'150-311-1344','886-211-7766','595-957-1934','966-790-4688','125-763-3116','546-805-9081',
'641-572-3731','959-890-9051','887-129-5496')
warehouse_managers_df <- data.frame(warehouse_manager_id, warehouse_manager_name, warehouse_manager_monthly_salary, warehouse_manager_phone)
#warehouse
warehouse_id <- sprintf('%d', 1:21)
warehouse_location <- c('19 Mayflower Street Brooklyn, NY 11211','2 Howard St. Brooklyn, NY 11215','4 Smoky Hollow St. Manhattan, NY 10002','823 Myrtle St. Bronx, NY 10469',
'936 Crescent St. Levittown, NY 11756', '7982 Brown Street Yonkers, NY 10701', '9076 Mammoth Street Spring Valley, NY 10977', '11 Border Street Huntington, NY 11743',
'41 Spruce St. Staten Island, NY 10314', '13 Sycamore Rd. Bronx, NY 10465', '9910 NE. Locust Court Bronx, NY 10466', '88 Miller St. Jamestown, NY 14701',
'68 North Orchard Ave. Brooklyn, NY 11216', '143 Oak Meadow Ave. Jamaica, NY 11434', '239 Rockcrest St. Woodside, NY 11377', '695 Rockwell St. Jamaica, NY 11432',
'7829 Bay St. Manhattan, NY 10033', '32A North Wagon Drive Bronx, NY 10463','8349 Bellevue St. Manhattan, NY 10023','908 Cherry Dr. Manhattan, NY 10003',
'9853 Brandywine Drive Lockport, NY 14094')
warehouse_phone <- c('859-611-5195', '608-127-5448', '285-491-0611', '839-149-3369', '450-592-9482', '922-365-2041', '513-604-1717', '451-323-3503', '390-900-3767','625-914-4000',
'269-297-5386', '637-432-9560', '357-278-5136', '167-770-6781', '346-154-9897', '431-385-8925', '727-649-6911', '349-791-2321', '571-807-8625', '217-481-8053','331-426-1570')
warehouse_manager_id <- sample(warehouse_manager_id, size = 21)
warehouse_df <- data.frame(warehouse_id, cat_id, warehouse_location, warehouse_phone, warehouse_manager_id)
# Write Data into PostgresSQL server
dbWriteTable(con, name = 'dma', value = dma, row.names = FALSE, append=TRUE)
print('1')
dbWriteTable(con, name = 'zipcode', value = zipcode, row.names = FALSE, append=TRUE)
print('2')
dbWriteTable(con, name = 'acquire_source', value = acquire_source, row.names = FALSE, append=TRUE)
print('3')
dbWriteTable(con, name = 'customer', value = customer, row.names = FALSE, append=TRUE)
print('4')
dbWriteTable(con, name = 'review', value = review, row.names = FALSE, append=TRUE)
print('5')
dbWriteTable(con, name = 'orders', value = orders, row.names = FALSE, append=TRUE)
print('6')
dbWriteTable(con, name = 'discount', value = discount, row.names = FALSE, append=TRUE)
print('7')
dbWriteTable(con, name = 'order_items', value = order_items, row.names = FALSE, append=TRUE)
print('8')
dbWriteTable(con, name='loss_reason', value=loss_reason, row.names=FALSE, append =TRUE)
print('9')
dbWriteTable(con, name = 'restocking_df', value = restocking_df, row.names = FALSE, append=TRUE)
print('10')
dbWriteTable(con, name = 'supplier_df', value = supplier_df, row.names = FALSE, append=TRUE)
print('11')
dbWriteTable(con, name = 'items_df', value = items_df, row.names = FALSE, append=TRUE)
print('12')
dbWriteTable(con, name="category", value=category_df, row.names=FALSE, append=TRUE)
print('13')
dbWriteTable(con, name="warehouse_managers", value=warehouse_managers_df, row.names=FALSE, append=TRUE)
print('14')
dbWriteTable(con, name="warehouse", value=warehouse_df, row.names=FALSE, append=TRUE)
print('15')
dbWriteTable(con, name='loss_records', value=loss_records, row.names=FALSE, append =TRUE)
print('16')
################# DEBUG #################
# Create Functions
age_level_func <-
"CREATE OR REPLACE FUNCTION Age_Level(age double precision)
RETURNS TEXT AS
$func$
DECLARE res text;
BEGIN
select case
when age < 30 then 'Young'
when age > 60 then 'Older'
ELSE 'Middle-Aged' end into res;
return res;
END;
$func$
LANGUAGE plpgsql;
"
income_level_func <- "
CREATE OR REPLACE FUNCTION Income_Level (income double precision)
RETURNS TEXT AS
$income_level$
DECLARE res TEXT;
BEGIN
select (case
when income < 50000 then 'Low'
when income > 150000 then 'High'
ELSE 'Middle'
end ) into res;
RETURN res;
END;
$income_level$
LANGUAGE plpgsql;
"
# Upload Function
dbGetQuery(con, age_level_func)
dbGetQuery(con, income_level_func)
|
context("test-get-weight")
test_that("misc", {
f <- function(seq,
n_gram,
pos, time,
data_time,
alphabet_size = 100,
noise = 0,
order_bound = 3,
...) {
mod <- new_ppm_decay(alphabet_size = alphabet_size, noise = noise,
order_bound = order_bound, ...)
model_seq(mod, seq, time = data_time,
train = TRUE,
predict = FALSE,
zero_indexed = TRUE)
get_weight(mod, n_gram, pos = pos, time = time, update_excluded = FALSE,
zero_indexed = TRUE)
}
decay_exp <- function(time_elapsed, half_life, start, end) {
lambda <- log(2) / half_life
end + (start - end) * exp(- lambda * time_elapsed)
}
## Item buffers
# Buffer = 10 - everything at full stm_rate
f(seq = rep(1, times = 9),
n_gram = 1,
pos = 10, time = 10,
data_time = 1:9,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 0.000000001,
noise = 0) %>% expect_equal(9)
# No more than 10 cases can be counted
f(seq = rep(1, times = 15),
n_gram = 1,
pos = 16, time = 16,
data_time = 1:15,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 0.000000001,
noise = 0) %>% expect_equal(10)
# Now set a non-zero ltm_weight
f(seq = rep(1, times = 15),
n_gram = 1,
pos = 16, time = 16,
data_time = 1:15,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_weight = 0.1,
ltm_half_life = 1e60,
noise = 0) %>% expect_equal(10 + 0.5)
# Now to distinguish time from position,
# we need to set a non-zero half-life.
# Nothing within the buffer decays
f(seq = rep(1, times = 10),
n_gram = 1,
pos = 10, time = 10,
data_time = 1:10,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_weight = 1,
ltm_half_life = 1,
# ltm_weight = 0,
noise = 0) %>% expect_equal(10)
# Past the buffer, we decay with a half-life of 1
f(seq = rep(1, times = 11),
n_gram = 1,
pos = 12, time = 12,
data_time = 1:11,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 1,
noise = 0) %>% expect_equal(10 + 0.5)
f(seq = rep(1, times = 11),
n_gram = 1,
pos = 12, time = 13,
data_time = 1:11,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
noise = 0) %>% expect_equal(10 + 0.25)
## Time buffers
f(seq = rep(1, times = 10),
n_gram = 1,
pos = 10, time = 10,
data_time = seq(from = 1, by = 0.5, length.out = 10),
buffer_length_time = 7,
buffer_length_items = 1000,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
noise = 0) %>%
expect_equal({
decay_exp(2, 1, 1, 0) +
decay_exp(1.5, 1, 1, 0) +
decay_exp(1, 1, 1, 0) +
decay_exp(0.5, 1, 1, 0) +
6
})
## Buffers with longer n-grams
# With a buffer of length 4,
# an n-gram of length 2 with its final symbol at pos = 1
# should still be in the buffer two symbols later (pos = 3)
# and quit it at pos = 4.
f(seq = 1:4,
n_gram = c(1, 2),
pos = 3, time = 3,
data_time = 0:3,
buffer_length_time = 999999,
buffer_length_items = 4,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(1)
f(seq = 1:5, # <------
n_gram = c(1, 2),
pos = 4, time = 4, # <------
data_time = 0:4, # <------
buffer_length_time = 999999,
buffer_length_items = 4,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(0.1)
# With a buffer of time length 4,
# an n-gram of length 2 with its first symbol at pos/time = 1
# should still be in the buffer at time = 4
# and quit it at time = 5
f(seq = 1:6,
n_gram = c(2, 3),
pos = 4, time = 4,
data_time = 0:5,
buffer_length_time = 4,
buffer_length_items = 1000,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(1)
f(seq = 1:6,
n_gram = c(2, 3),
pos = 5, time = 5,
data_time = 0:5,
buffer_length_time = 4,
buffer_length_items = 1000,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(0.1)
## Buffer rate
f(seq = rep(1, times = 10),
n_gram = 1,
pos = 10, time = 10,
data_time = seq(from = 1, by = 0.5, length.out = 10),
buffer_length_time = 7,
buffer_length_items = 1000,
buffer_weight = 0.5,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 1,
noise = 0) %>%
expect_equal({
decay_exp(2, 1, 1, 0) +
decay_exp(1.5, 1, 1, 0) +
decay_exp(1, 1, 1, 0) +
decay_exp(0.5, 1, 1, 0) +
1 + 5 * 0.5
})
})
| /tests/testthat/test-get-weight.R | permissive | Computational-Cognitive-Musicology-Lab/ppm | R | false | false | 5,309 | r | context("test-get-weight")
test_that("misc", {
f <- function(seq,
n_gram,
pos, time,
data_time,
alphabet_size = 100,
noise = 0,
order_bound = 3,
...) {
mod <- new_ppm_decay(alphabet_size = alphabet_size, noise = noise,
order_bound = order_bound, ...)
model_seq(mod, seq, time = data_time,
train = TRUE,
predict = FALSE,
zero_indexed = TRUE)
get_weight(mod, n_gram, pos = pos, time = time, update_excluded = FALSE,
zero_indexed = TRUE)
}
decay_exp <- function(time_elapsed, half_life, start, end) {
lambda <- log(2) / half_life
end + (start - end) * exp(- lambda * time_elapsed)
}
## Item buffers
# Buffer = 10 - everything at full stm_rate
f(seq = rep(1, times = 9),
n_gram = 1,
pos = 10, time = 10,
data_time = 1:9,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 0.000000001,
noise = 0) %>% expect_equal(9)
# No more than 10 cases can be counted
f(seq = rep(1, times = 15),
n_gram = 1,
pos = 16, time = 16,
data_time = 1:15,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 0.000000001,
noise = 0) %>% expect_equal(10)
# Now set a non-zero ltm_weight
f(seq = rep(1, times = 15),
n_gram = 1,
pos = 16, time = 16,
data_time = 1:15,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_weight = 0.1,
ltm_half_life = 1e60,
noise = 0) %>% expect_equal(10 + 0.5)
# Now to distinguish time from position,
# we need to set a non-zero half-life.
# Nothing within the buffer decays
f(seq = rep(1, times = 10),
n_gram = 1,
pos = 10, time = 10,
data_time = 1:10,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_weight = 1,
ltm_half_life = 1,
# ltm_weight = 0,
noise = 0) %>% expect_equal(10)
# Past the buffer, we decay with a half-life of 1
f(seq = rep(1, times = 11),
n_gram = 1,
pos = 12, time = 12,
data_time = 1:11,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 1,
noise = 0) %>% expect_equal(10 + 0.5)
f(seq = rep(1, times = 11),
n_gram = 1,
pos = 12, time = 13,
data_time = 1:11,
buffer_length_time = 999999,
buffer_length_items = 10,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
noise = 0) %>% expect_equal(10 + 0.25)
## Time buffers
f(seq = rep(1, times = 10),
n_gram = 1,
pos = 10, time = 10,
data_time = seq(from = 1, by = 0.5, length.out = 10),
buffer_length_time = 7,
buffer_length_items = 1000,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
noise = 0) %>%
expect_equal({
decay_exp(2, 1, 1, 0) +
decay_exp(1.5, 1, 1, 0) +
decay_exp(1, 1, 1, 0) +
decay_exp(0.5, 1, 1, 0) +
6
})
## Buffers with longer n-grams
# With a buffer of length 4,
# an n-gram of length 2 with its final symbol at pos = 1
# should still be in the buffer two symbols later (pos = 3)
# and quit it at pos = 4.
f(seq = 1:4,
n_gram = c(1, 2),
pos = 3, time = 3,
data_time = 0:3,
buffer_length_time = 999999,
buffer_length_items = 4,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(1)
f(seq = 1:5, # <------
n_gram = c(1, 2),
pos = 4, time = 4, # <------
data_time = 0:4, # <------
buffer_length_time = 999999,
buffer_length_items = 4,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(0.1)
# With a buffer of time length 4,
# an n-gram of length 2 with its first symbol at pos/time = 1
# should still be in the buffer at time = 4
# and quit it at time = 5
f(seq = 1:6,
n_gram = c(2, 3),
pos = 4, time = 4,
data_time = 0:5,
buffer_length_time = 4,
buffer_length_items = 1000,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(1)
f(seq = 1:6,
n_gram = c(2, 3),
pos = 5, time = 5,
data_time = 0:5,
buffer_length_time = 4,
buffer_length_items = 1000,
buffer_weight = 1,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 0.1,
noise = 0) %>% expect_equal(0.1)
## Buffer rate
f(seq = rep(1, times = 10),
n_gram = 1,
pos = 10, time = 10,
data_time = seq(from = 1, by = 0.5, length.out = 10),
buffer_length_time = 7,
buffer_length_items = 1000,
buffer_weight = 0.5,
stm_duration = 0,
ltm_half_life = 1,
ltm_weight = 1,
noise = 0) %>%
expect_equal({
decay_exp(2, 1, 1, 0) +
decay_exp(1.5, 1, 1, 0) +
decay_exp(1, 1, 1, 0) +
decay_exp(0.5, 1, 1, 0) +
1 + 5 * 0.5
})
})
|
# Read in Files:
# Adapted From:
# https://blog.exploratory.io/how-to-read-multiple-excel-or-csv-files-together-42af5d314a10
# Since all file share the same data structure, Combine the dataaset into one data frame for easy manipulation
datset <-list.files(pattern="*.csv")
ATPSeasons <- sapply(datset, read_csv, simplify=FALSE) %>%
bind_rows(.id = "id")
ATPSeasons %>% group_by(tourney_name) %>%
count(tourney_name)
# 827 tournments
#################################################################################
# Manipulation:
# Cleaning:
ATPSeasons %>%
filter(score %in% c("RET", "W/O"))
# 171
# Delete rows with incomplete matches
ATPSeasons<-ATPSeasons[!(ATPSeasons$score=="RET" | ATPSeasons$score=="W/O"),]
# drop rows that have NA statistics:
ATPSeasons<-ATPSeasons[!(is.na(ATPSeasons$w_ace)),] # when ace is NA, mostly, the rest of the statistics are NA
# Exclude Davis Cup:
ATPSeasons <- ATPSeasons %>%
filter(tourney_level != "D")
dim(ATPSeasons)
# 50 * 26507
# id as a year for easy analysis by year:
ATPSeasons$id <-
ifelse(
ATPSeasons$id == "atp_matches_2009.csv",
"2009",
ifelse(
ATPSeasons$id == "atp_matches_2010.csv",
"2010",
ifelse(
ATPSeasons$id == "atp_matches_2011.csv",
"2011",
ifelse(
ATPSeasons$id == "atp_matches_2012.csv",
"2012",
ifelse(
ATPSeasons$id == "atp_matches_2013.csv",
"2013",
ifelse(
ATPSeasons$id == "atp_matches_2014.csv",
"2014",
ifelse(
ATPSeasons$id == "atp_matches_2015.csv",
"2015",
ifelse(
ATPSeasons$id == "atp_matches_2016.csv",
"2016",
ifelse(
ATPSeasons$id == "atp_matches_2017.csv",
"2017",
ifelse(ATPSeasons$id == "atp_matches_2018.csv", "2018", NA)
)
)
)
)
)
)
)
)
)
####################################################################################################################
# EDA
# number of matches per level:
table(ATPSeasons$tourney_level)
# percentage of each level
# reorder the level according to tier:
ATPSeasons <- arrange(transform(ATPSeasons,
tourney_level=factor(tourney_level,levels=c("G", "M", "A","F","D"))),tourney_level)
ATPSeasons <- arrange(transform(ATPSeasons,
round=factor(round,levels=c("F", "SF", "QF", "R16", "R32", "R64", "R128", "PR"))),round)
ATPSeasonsLp <- ATPSeasons %>%
group_by(id) %>%
count(tourney_level) %>%
mutate(perc = n*100 / sum(n))
ATPSeasonsLp %>%
group_by(id) %>%
ggplot(aes(x = tourney_level, y = perc)) +
geom_bar(stat = "identity", fill='steelblue', color='black') +
theme(text = element_text(size = 20)) +
geom_text(aes(label = percent(round(perc)/100), vjust = 0.5),size = 7) +
facet_wrap(id ~.) +
xlab("Tourney Level") + ylab("Percentage %")
# ..................
# Surface
# percentage of each surface
ATPSeasonsLp <- ATPSeasons %>%
group_by(id) %>%
count(surface) %>%
mutate(perc = n*100 / sum(n))
ATPSeasonsLp %>%
group_by(id) %>%
ggplot(aes(x = surface, y = perc)) +
geom_bar(stat = "identity", fill='steelblue', color='black') +
theme(text = element_text(size = 20)) +
geom_text(aes(label = percent(round(perc)/100), vjust = 1),size = 7) +
facet_wrap(id ~.) +
xlab("Surface") + ylab("Percentage %")
# ..................
# winner_hand
table(ATPSeasons$winner_hand)
ATPSeasons$winner_hand[is.na(ATPSeasons$winner_hand)] <- "U"
sum(is.na(ATPSeasons$winner_hand))
# loser_hand
table(ATPSeasons$loser_hand)
ATPSeasons$loser_hand[is.na(ATPSeasons$loser_hand)] <- "U"
sum(is.na(ATPSeasons$loser_hand))
# ..................
# Age difference distribution:
ATPSeasons$agedif <- ATPSeasons$winner_age - ATPSeasons$loser_age
# Adapted From:
# https://www.kaggle.com/ambarish/omnibus-womens-and-mens-tennis-matches-analysis
ATPSeasons %>%
ggplot(aes(x = agedif)) +
geom_density(fill = "steelblue") +
labs(x= 'Age Difference',y = 'Count', title = paste("Distribution of Age Difference")) +
theme_bw()
sd(ATPSeasons$agedif, na.rm = TRUE)
# 5.365677
# age mean
plot1 <- ATPSeasons %>%
group_by(id) %>%
summarise(Winners = mean(winner_age, na.rm = TRUE), Losers = mean(loser_age, na.rm = TRUE) ) %>%
pivot_longer(c(Winners, Losers), names_to = "Type", values_to = "Age") %>%
ggplot(aes(id, Age, group = Type, color = Type)) + geom_line(size = 1) +
labs(title = paste(" (a) Distribution of Age Difference")) +
theme(text = element_text(size = 20), legend.position = "none") +
labs(x= "Year", y="Mean Age")
# ..................
# Hight difference distribution:
ATPSeasons$htdif <- ATPSeasons$winner_ht - ATPSeasons$loser_ht
# Adapted From:
# https://www.kaggle.com/ambarish/omnibus-womens-and-mens-tennis-matches-analysis
ATPSeasons %>%
ggplot(aes(x = htdif)) +
geom_density(fill = "steelblue") +
labs(x= 'Height Difference',y = 'Count', title = paste("Distribution of Height Difference")) +
theme_bw()
sd(ATPSeasons$htdif, na.rm = TRUE)
# 10.11299
# age mean
plot2 <- ATPSeasons %>%
group_by(id) %>%
summarise(Winners = mean(winner_ht, na.rm = TRUE), Losers = mean(loser_ht, na.rm = TRUE) ) %>%
pivot_longer(c(Winners, Losers), names_to = "Type", values_to = "Hight") %>%
ggplot(aes(id, Hight, group = Type, color = Type)) + geom_line(size = 1) +
labs(title = paste(" (b) Distribution of Height Difference")) +
theme(text = element_text(size = 20)) +
labs(x= "Year", y="Mean Height")
grid.arrange(plot1, plot2, ncol=2)
# ..................
# best of
table(ATPSeasons$best_of)
# 3 5
# 21445 5062
# ..................
# who won the most matches each season in the last 10 years before 2019?
winnersT<- ATPSeasons %>%
group_by(id, winner_name)%>%
mutate(wins = n())%>%
arrange(id)
summary(winnersT)
# Investigate correlation between Tournaments wins and Wins number / win ratio each season:
# wins number:
ATPSeasons <- ATPSeasons %>%
group_by(id, winner_name)%>%
mutate(winers_wins = n())
# Win ratio:
# loses of winners:
Lossdf <- ATPSeasons %>% group_by(id, loser_name)%>%
mutate(winers_loses = n())
# join 2 df:
ATPSeasons <- ATPSeasons %>%
inner_join(Lossdf, by = c("winner_name" = "loser_name", "id" = "id")) %>%
mutate(winner_wr = winers_wins*100/(winers_wins + winers_loses))
# Tournaments wins:
TwinnersT <- ATPSeasons %>% filter(round == "F") %>%
group_by(id, winner_name)%>%
mutate(Twins = n())
# correlation:
Win_Cor_Season <- TwinnersT %>% group_by(id)%>%
summarise(Tour_Wnum = cor(Twins,winers_wins), Tour_WRatio = cor(Twins,winner_wr))
# plot:
Win_Cor_Season %>%
pivot_longer(c(Tour_Wnum, Tour_WRatio), names_to = "Type", values_to = "Corr") %>%
ggplot(aes(x = id, y = Corr, group = Type, color = Type)) +
geom_line(size =1) + theme(axis.text.x = element_text(hjust = 1)) +
theme(text = element_text(size = 20)) +
labs(x= "Year", y="Correlation Ratio")
cor.test(TwinnersT$Twins,TwinnersT$winers_wins) # 2.2e-16
cor.test(TwinnersT$Twins,TwinnersT$winner_wr) # 2.2e-16
# inspect 2014 season:
Twinners2014 <- TwinnersT %>% filter(id == "2014") %>%
group_by(winner_name)%>%
summarize(Twins = n())%>%
arrange(desc(Twins))
model1 <- lm(Twins ~ winers_wins, data = TwinnersT)
summary(model1)
model2 <- lm(Twins ~ winner_wr, data = TwinnersT)
summary(model2)
# .................
# Investigate winners performance per surface in the Final rounds:
# the winner of the final round will be the tournament winner.
## Tournament winners by surface: Players win ratio per surface for the tournament winners
Twinners2014 %>%
inner_join(ATPSeasons %>% filter(id == "2014"), by = "winner_name", suffix = c("_19", "_TW") ) %>%
group_by(winner_name) %>%
count(surface) -> TWW
Twinners2014 %>%
inner_join(ATPSeasons %>% filter(id == "2014"), by = c("winner_name" = "loser_name")) %>%
group_by(winner_name) %>%
count(surface) -> TWL
TWW %>%
inner_join(TWL, by = "winner_name", suffix = c("_W", "_L") ) -> df1
df1$surface <- NA
df1$Sprec <- NA
df1 <- df1 %>% pivot_wider(names_from = surface_W, values_from = n_W, values_fill = 0) %>%
pivot_longer(c(Clay, Grass, Hard), names_to = "surface_W", values_to = "n_W") %>%
pivot_wider(names_from = surface_L, values_from = n_L, values_fill = 0) %>%
pivot_longer(c(Clay, Grass, Hard), names_to = "surface_L", values_to = "n_L")
df1$surface <- unlist(df1$surface)
for (i in 1:nrow(df1)) {
if (df1[i,4] == df1[i,6]){
df1$surface[i] <- df1[i,4]
df1$Sprec[i] <- df1$n_W[i]*100/(df1$n_W[i]+ df1$n_L[i])
}
}
df1 <- na.omit(df1)
df1$surface <- unlist(df1$surface)
df1 %>%
ggplot(aes(winner_name, y=Sprec, fill = surface)) +
geom_col() + theme(text = element_text(size = 20), axis.text.x = element_text(angle = 90, hjust = 1)) +
geom_text(aes(label = round(Sprec,2)), position = position_stack(vjust = 0.5), fontface = "bold")+
xlab("Winner Name") + ylab("Percentage of Wins %")
df1 %>% group_by(winner_name)
# ..................
# Players' Ranks:
Season2014 <- ATPSeasons %>% filter(id == "2014")
TSeason2014 <- TwinnersT %>% filter(id == "2014")
summary(Season2014$winner_rank)
sum(is.na(Season2014$winner_rank))
g1 <- Season2014 %>%
ggplot(aes(x = winner_rank)) +
geom_density(aes( y=..scaled..), fill='steelblue', color='black') +
theme(text = element_text(size = 20), axis.text.x = element_text(hjust = 0.5)) +
xlim(1,503) + xlab("Winner Rank") + ylab("Scale") +
ggtitle(" (a) The Density Estimate of Winners Ranks") +
theme(plot.title = element_text(size = 20, face = "bold"))
g2 <- Season2014 %>%
filter(winner_rank <=73)%>%
count(winner_rank) %>%
ggplot(aes(x = winner_rank, y = n)) +
geom_col(fill='steelblue', color='black') +
theme(text = element_text(size = 20), axis.text.x = element_text(hjust = 0.5)) +
xlim(1,76) + xlab("Winner Rank") + ylab("Count") +
ggtitle(" (b) The 75th percentile of Winners Ranks") +
theme(plot.title = element_text(size = 20, face = "bold"))
# The distribution is sort of uniform especially form rank 10 to 76
summary(TSeason2014$winner_rank)
g3 <- ggplot(TSeason2014 %>% filter(winner_rank <=30), aes(x=winner_rank, fill = surface)) +
geom_bar() +
theme(text = element_text(size = 20), axis.text.x = element_text(hjust = 0.5)) +
facet_wrap(tourney_level~.) +
xlab("Winner Rank") + ylab("Count") +
ggtitle(" (c) Ranks Distribution of Tournament Winners by Level and Surface ") +
theme(plot.title = element_text(size = 20, face = "bold"))
grid.arrange(arrangeGrob(g1),
arrangeGrob(g2,g3, ncol=1),
ncol=2, widths=c(1,1))
# ..................
# Player seeds
Season2014 <- ATPSeasons %>% filter(id == "2014")
Season2014 %>%
arrange(winner_rank) %>%
group_by(winner_rank) %>%
count()
Season2014 %>%
count(winner_rank) %>%
ggplot(aes(x = winner_rank)) +
geom_histogram(fill='steelblue') +
theme(axis.text.x = element_text(angle = 90, hjust = 0.5)) +
scale_x_continuous("Winner Seed", labels = as.character(Season2014$winner_rank), breaks = Season2014$winner_rank) +
ylab("Count")
table(Season2014$winner_rank, Season2014$tourney_level)
ATPSeasons %>%
filter(winner_rank <=100) %>%
count(winner_rank) %>%
ggplot(aes(x = winner_rank)) +
geom_histogram(fill='steelblue') +
theme(axis.text.x = element_text(angle = 90, hjust = 0.5)) +
scale_x_continuous("Winner Rank", labels = as.character(Season2014$winner_rank), breaks = Season2014$winner_rank) +
theme(text = element_text(size = 20)) +
xlab("Winner Rank") + ylab("Count") + xlim(0, 100) + facet_wrap(id~.)
# * higher seeded players win more matches than lower ranked seed players.(para)
# Are seeds more likely to win tournaments?
# Seeds
TwinnersT %>%
group_by(winner_seed) %>%
count() %>%
arrange(desc(n))
ggplot(TwinnersT, aes(x=winner_seed, fill = tourney_level)) +
geom_histogram() +
theme(text = element_text(size = 20)) +
xlab("Winner Seed") + ylab("Count") +
facet_wrap(id~.)
# Ranks
TwinnersT %>%
group_by(winner_rank) %>%
count() %>%
arrange(desc(n))
ggplot(TwinnersT, aes(x=winner_rank, fill = tourney_level)) +
geom_histogram() +
theme(text = element_text(size = 20)) +
xlab("Winner Seed") + ylab("Count") + xlim(0, 300) +
facet_wrap(id~.)
#################################################################################################################
# Match Statistics: Winners and Losers
# Aces
# #Number of aces as a predictor
Yearsdf = c(2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018)
ace_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
ace_dif<- na.omit(df$w_ace) - na.omit(df$l_ace)
ace_dif <- ifelse(ace_dif > 0 , 1 , 0)
#what percentage of time did winner hit more aces than the loser
ace_per[n] <- sum(ace_dif, na.rm = TRUE)*100/length(ace_dif)
n <- n +1
}
# statistics:
summary(Season2014$w_ace)
summary(Season2014$l_ace)
# .......................
# Adapted From:
# https://medium.com/swlh/predicting-atp-tennis-match-outcomes-using-serving-statistics-cde03d99f410
# Double faults faults as a predictor
# difference between number of double faults hit by the loser and the winner
df_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
df_dif<- na.omit(df$l_df) - na.omit(df$w_df)
df_dif <- ifelse(df_dif > 0 , 1 , 0)
#what percentage of time did losers make more double faults than the winners
df_per[n] <- sum(df_dif, na.rm = TRUE)*100/length(df_dif)
n <- n +1
}
# statistics:
summary(Season2014$w_ace)
summary(Season2014$l_ace)
# .......................
# The first serve percentage
fsp_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
p1spw <- df$w_1stIn/df$w_svpt # winner
p1spl <- df$l_1stIn/df$l_svpt # loser
p1sd <- ifelse((p1spw - p1spl) > 0 , 1 , 0)
#percentage of time winner had a better 1st serve percentage
fsp_per[n] <- sum(p1sd, na.rm = TRUE)*100/length(p1sd)
n <- n +1
}
# .......................
#First and second serve winning percentages
# First
fswp_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
w1swp <- df$w_1stWon/ df$w_1stIn
w1slp <- df$l_1stWon/ df$l_1stIn
f1swpd <- ifelse((w1swp - w1slp) > 0 , 1 , 0)
#percentage of time the winning player had a higher winning percentage on 1st serve
fswp_per[n] <- sum(f1swpd, na.rm = TRUE)*100/length(f1swpd)
n <- n +1
}
# Second
S2swp_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
w2swp <- df$w_2ndWon/ (df$w_svpt- df$w_1stIn)
w2slp <- df$l_2ndWon/ (df$l_svpt- df$l_1stIn)
s2swpd <- ifelse((w2swp - w2slp) > 0 , 1 , 0)
#percentage of time the winning player had a higher winning percentage on 2nd serve
S2swp_per[n] <- sum(s2swpd, na.rm = TRUE)*100/length(s2swpd)
n <- n +1
}
# .......................
# Break Points
# Break points faced
bpf_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
bpfd <- ifelse((df$w_bpFaced - df$l_bpFaced) > 0 , 1 , 0)
#percentage
bpf_per[n] <- sum(bpfd, na.rm = TRUE)*100/length(bpfd)
n <- n +1
}
# break points saved
bps_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
df$w_bpSavedp <- ifelse(df$w_bpFaced == 0, 0, df$w_bpSaved*100/df$w_bpFaced)
df$l_bpSavedp <- ifelse(df$l_bpFaced == 0, 0, df$l_bpSaved*100/df$l_bpFaced)
bpsd <- ifelse((df$w_bpSavedp - df$l_bpSavedp) > 0 , 1 , 0)
#percentage
bps_per[n] <- sum(bpsd, na.rm = TRUE)*100/length(bpsd)
n <- n +1
}
# Return Games Won %
Ret_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
df$w_Retpwp <- (df$l_svpt-(df$l_1stWon+df$l_2ndWon))*100/df$l_svpt
df$l_Retpwp <- (df$w_svpt-(df$w_1stWon+df$w_2ndWon))*100/df$w_svpt
Retd <- ifelse((df$w_Retpwp - df$l_Retpwp) > 0 , 1 , 0)
#percentage
Ret_per[n] <- sum(Retd, na.rm = TRUE)*100/length(Retd)
n <- n +1
}
# data frame of percentage difference:
per_diff_stat <- data.frame(matrix(ncol = 11, nrow = 8))
x <- c("Stat", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018")
colnames(per_diff_stat) <- x
# stats of interest:
per_diff_stat$Stat <- c("Aces", "Double Faults", "First Serve Percentage",
"1st Serve Win %", "2nd Serve Win %",
"Break Points Saved", "Break Points Faced", "Return Games Won %")
mStat <- data.frame(ace_per, df_per, fsp_per, fswp_per, S2swp_per, bps_per, bpf_per, Ret_per)
n <- 1
for(n in 1:length(mStat)){
per_diff_stat[n,2:11] <- mStat[,n]
n <- n+1
}
################################################################################
################################################################################
################################################################################
## Upset matches:
# Define:
# rank difference > 15
# seeds were inspected and they are not very accurate
# will build on ranks
## How many times occur in the last 10 years?
# Create upset column : (1 means the match was an upset)
ATPSeasons$Upset15 <- ifelse(ATPSeasons$winner_rank - ATPSeasons$loser_rank > 15, 1, 0)
ATPSeasons %>%
group_by(id) %>%
summarise(upsets_num = sum(Upset15, na.rm = TRUE)) %>%
ggplot(aes(id, upsets_num, group = 1)) + geom_line(size = 1) +
theme(text = element_text(size = 20))+
labs(x= "Year", y="Number of Upset Matches")
# Investigate upset matches: any patterns?
# remove NA:
ATPSeasons <- ATPSeasons[!(is.na(ATPSeasons$Upset15)),]
# Surface
# percentage of each surface
# upset:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id) %>%
count(surface) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = surface, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# regular:
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id) %>%
count(surface) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = surface, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# .................................
# tourney_level
# upset:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id) %>%
count(tourney_level) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = tourney_level, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# regular:
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id) %>%
count(tourney_level) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = tourney_level, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# .................................
# round:
# upset:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id) %>%
count(round) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = round, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# regular:
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id) %>%
count(round) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = round, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# .................................
# minutes:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id )-> m1
summary(m1$minutes)
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id )-> m0
summary(m0$minutes)
mperavg <- (mean(m1$minutes, na.rm = TRUE)-mean(m0$minutes, na.rm = TRUE))*200/(mean(m1$minutes, na.rm = TRUE)+mean(m0$minutes, na.rm = TRUE))
# 3.17%
# 2014
ATPSeasons %>%
filter(id == "2014", Upset15 == 1) -> m1
summary(m1$minutes)
ATPSeasons %>%
filter(id == "2014", Upset15 == 0) -> m0
summary(m0$minutes)
mperavg <- (mean(m1$minutes, na.rm = TRUE)-mean(m0$minutes, na.rm = TRUE))*200/(mean(m1$minutes, na.rm = TRUE)+mean(m0$minutes, na.rm = TRUE))
# 0.17%
# .................................
# match statistics 2014:
# Aces
# #Number of aces as a predictor
# upsets:
U <- ATPSeasons %>%
filter(id == "2014", Upset15 == 1)
ace_dif <- na.omit(U$w_ace) - na.omit(U$l_ace)
ace_dif <- ifelse(ace_dif > 0 , 1 , 0)
#what percentage of time did winner hit more aces than the loser
sum(ace_dif, na.rm = TRUE)*100/length(ace_dif)
# 55.33662
# statistics:
summary(U$w_ace)
summary(U$l_ace)
# Regular:
R <- ATPSeasons %>%
filter(id == "2014", Upset15 == 0)
ace_dif <- na.omit(R$w_ace) - na.omit(R$l_ace)
ace_dif <- ifelse(ace_dif > 0 , 1 , 0)
#what percentage of time did winner hit more aces than the loser
sum(ace_dif, na.rm = TRUE)*100/length(ace_dif)
# 61.97%
# statistics:
summary(R$w_ace)
summary(R$l_ace)
#-------------------
# Double faults
# Number of double faults as a predictor
# difference between number of double faults hit by the loser and the winner
# Upsets:
df_dif <- U$l_df - U$w_df
#number of times loser hit more double faults in a match
df_dif <- ifelse(df_dif > 0 , 1 , 0)
#what percentage of time did loser hit more double faults than the winner
sum(df_dif, na.rm = TRUE)*100/length(df_dif)
# 53.69 %
summary(U$w_df)
summary(U$l_df)
# Regular:
df_dif <- R$l_df - R$w_df
#number of times loser hit more double faults in a match
df_dif <- ifelse(df_dif > 0 , 1 , 0)
#what percentage of time did loser hit more double faults than the winner
sum(df_dif, na.rm = TRUE)*100/length(df_dif)
# 51.12 %
summary(R$w_df)
summary(R$l_df)
# .................................
# #first serve percentage = number of first serves made / number of serve points
#upsets:
p1spw <- U$w_1stIn/U$w_svpt # winner
p1spl <- U$l_1stIn/U$l_svpt # loser
summary(p1spw)
summary(p1spl)
#First serve percentage as a predictor
#percentage of time winner had a better 1st serve percentage
p1sd <- ifelse((p1spw - p1spl) > 0 , 1 , 0)
sum(p1sd, na.rm = TRUE)*100/length(p1sd)
# 51.4 %
# Regular:
p1spw <- R$w_1stIn/R$w_svpt # winner
p1spl <- R$l_1stIn/R$l_svpt # loser
summary(p1spw)
summary(p1spl)
#First serve percentage as a predictor
#percentage of time winner had a better 1st serve percentage
p1sd <- ifelse((p1spw - p1spl) > 0 , 1 , 0)
sum(p1sd, na.rm = TRUE)*100/length(p1sd)
# 56.31 %
#First and second serve winning percentages
# #first serve winning percentage = number of first-serve points won / number of first serves made
# Upsets:
w1swp <- U$w_1stWon/ U$w_1stIn
w1slp <- U$l_1stWon/ U$l_1stIn
#percentage of time the winning player had a higher winning percentage on 1st serve
f1swpd <- ifelse((w1swp - w1slp) > 0 , 1 , 0)
sum(f1swpd, na.rm = TRUE)*100/length(f1swpd)
# 77.18%
summary(w1swp)
summary(w1slp)
# Regular:
w1swp <- R$w_1stWon/R$w_1stIn
w1slp <- R$l_1stWon/R$l_1stIn
#percentage of time the winning player had a higher winning percentage on 1st serve
f1swpd <- ifelse((w1swp - w1slp) > 0 , 1 , 0)
sum(f1swpd, na.rm = TRUE)*100/length(f1swpd)
# 85.23%
summary(w1swp)
summary(w1slp)
#second serve winning percentage
# Upsets:
w2swp <- U$w_2ndWon/ (U$w_svpt- U$w_1stIn)
w2slp <- U$l_2ndWon/ (U$l_svpt- U$l_1stIn)
# #percentage of time the winning player had a higher winning percentage on 2nd serve
s2swpd <- ifelse((w2swp - w2slp) > 0 , 1 , 0)
sum(s2swpd, na.rm = TRUE)*100/length(s2swpd)
# 75.53%
summary(w2swp)
summary(w2slp)
# Regular:
w2swp <- R$w_2ndWon/ (R$w_svpt - R$w_1stIn)
w2slp <- R$l_2ndWon/ (R$l_svpt - R$l_1stIn)
# #percentage of time the winning player had a higher winning percentage on 2nd serve
s2swpd <- ifelse((w2swp - w2slp) > 0 , 1 , 0)
sum(s2swpd, na.rm = TRUE)*100/length(s2swpd)
# 76.63%
summary(w2swp)
summary(w2slp)
# .................................
# Break points faced
# Upset
bpf <- data.frame(winner = na.omit(U$w_bpFaced), loser = na.omit(U$l_bpFaced) )
bpfd <- ifelse((bpf$loser - bpf$winner) > 0 , 1 , 0)
sum(bpfd, na.rm = TRUE)*100/length(bpfd)
# 71.43%
summary(bpf)
# Regular:
bpf <- data.frame(winner = na.omit(R$w_bpFaced), loser = na.omit(R$l_bpFaced) )
summary(bpf)
bpfd <- ifelse((bpf$loser - bpf$winner) > 0 , 1 , 0)
sum(bpfd, na.rm = TRUE)*100/length(bpfd)
#78.72%
# .................................
# break points saved
# upset:
bps <- data.frame(winner = na.omit(U$w_bpSaved), loser = na.omit(U$l_bpSaved) )
bpsd <- ifelse((bps$winner - bps$loser) > 0 , 1 , 0)
sum(bpsd, na.rm = TRUE)*100/length(bpsd)
# 32.18%
summary(bps)
# Regular:
bps <- data.frame(winner = na.omit(R$w_bpSaved), loser = na.omit(R$l_bpSaved) )
bpsd <- ifelse((bps$winner - bps$loser) > 0 , 1 , 0)
sum(bpsd, na.rm = TRUE)*100/length(bpsd)
# 29.07%
summary(bps)
###############################################################################
# Upsets vs Reg for all years:
# upsets:
U <- ATPSeasons %>%
filter(Upset15 == 1)
# Regular:
R <- ATPSeasons %>%
filter(Upset15 == 0)
# another script
##############################################################################
# Adapted From: https://tennismash.com/2016/01/21/302/
# Compare the performance of the upset winner to their average performance on the same surface
upsets <- ATPSeasons %>%
filter(id == "2014", Upset15 == 1) # 609
regular <- ATPSeasons %>%
filter(id == "2014", Upset15 == 0) # 1964
## Find the statistics of the upsets winners in regular matches: df1 (as winners in R) & df2 (as losers in R)
## 1- upsets winners as winners of regular:
df1 <- upsets %>%
select(winner_id) %>%
distinct(winner_id) %>%
inner_join(regular, by = c("winner_id" = "winner_id"))
## 2- upsets winners as losers of regular:
df2 <- upsets %>%
select(winner_id) %>%
distinct(winner_id) %>%
inner_join(regular, by = c("winner_id" = "loser_id"))
## Find the statistics of the upsets losers in regular matches: df3 (as winners in R) & df4 (as losers in R)
## 3- upsets losers as winners of regular:
df3 <- upsets %>%
select(loser_id) %>%
distinct(loser_id) %>%
inner_join(regular, by = c("loser_id" = "winner_id"))
## 4- upsets losers as losers of regular:
df4 <- upsets %>%
select(loser_id) %>%
distinct(loser_id) %>%
inner_join(regular, by = c("loser_id" = "loser_id"))
# ...................
## statistics of the upsets winners in regular matches (df1 & df2)
## Then compare these statistics in upset matches in the same surface: (upsets)
data_list <- list(df1, df2, df3, df4)
Tstat <- data.frame(matrix(ncol = 13, nrow = 8))
x <- c("Stat", "df1C", "df1G","df1H", "df2C", "df2G","df2H","df3C", "df3G","df3H","df4C", "df4G","df4H")
colnames(Tstat) <- x
# stats of interest:
Tstat$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
surface <- c("Clay", "Grass", "Hard")
n <- 1
dn <- 0
for (d in data_list){
dn <- dn + 1
if(dn == 1 | dn == 3){
ifelse(n <= 4, n , 7)
for (s in surface){
df <- d %>% filter(surface == s)
n <- n +1
Tstat[1, n] <- mean(df$w_ace) ## Aces
Tstat[2, n] <- mean(df$w_df) ## double faults
Tstat[3, n] <- mean(df$w_1stIn/df$w_svpt, na.rm = TRUE) ## First serve percentage
Tstat[4, n] <- mean(df$w_1stWon/df$w_1stIn, na.rm = TRUE) ## First serve winning percentages
Tstat[5, n] <- mean(df$w_2ndWon/(df$w_svpt- df$w_1stIn), na.rm = TRUE) ## Second serve winning percentages
Tstat[6, n] <- mean(ifelse(df$w_bpFaced == 0, 0, df$w_bpSaved/df$w_bpFaced)) ## break points saved (%)
Tstat[7, n] <- mean(df$w_bpFaced) ## break points faced (n)
Tstat[8, n] <- mean((df$l_svpt-(df$l_1stWon+df$l_2ndWon))/df$l_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
} else {
ifelse(n <= 7, n , 10)
for (s in surface){
df <- d %>% filter(surface == s)
n <- n +1
Tstat[1, n] <- mean(df$l_ace) ## Aces
Tstat[2, n] <- mean(df$l_df) ## double faults
Tstat[3, n] <- mean(df$l_1stIn/df$l_svpt) ## First serve percentage
Tstat[4, n] <- mean(df$l_1stWon/df$l_1stIn) ## First serve winning percentages
Tstat[5, n] <- mean(df$l_2ndWon/(df$l_svpt- df$l_1stIn)) ## Second serve winning percentages
Tstat[6, n] <- mean(ifelse(df$l_bpFaced == 0, 0, df$l_bpSaved/df$l_bpFaced)) ## break points saved (%)
Tstat[7, n] <- mean(df$l_bpFaced) ## break points faced (n)
Tstat[8, n] <- mean((df$w_svpt-(df$w_1stWon+df$w_2ndWon))/df$w_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
}
}
# mean in regular matches: upset winners df1 & df2 ; upset losers: df3 & df4
Tstat2 <- data.frame(matrix(ncol = 7, nrow = 8))
x <- c("Stat", "dUC", "dUG","dUH", "dRC", "dRG","dRH") # U: upset winners ; R: upset losers
colnames(Tstat2) <- x
# stats of interest:
Tstat2$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
n <- 1
h <- 1
for (c in 1:2){
for (s in surface){
ifelse(n == 4, 7, n)
n <- n +1
h <- h +1
for (i in 1:8){
Tstat2[i, h] <- (Tstat[i, n]+Tstat[i, n+3])/2
}
}
}
# ........
# mean of winners in upset matches based on surfaces:
Tstat_up <- data.frame(matrix(ncol = 4, nrow = 8))
x <- c("Stat", "C", "G","H")
colnames(Tstat_up) <- x
# stats of interest:
Tstat_up$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
n <- 1
for (s in surface){
df <- upsets %>% filter(surface == s)
n <- n +1
Tstat_up[1, n] <- mean(df$w_ace) ## Aces
Tstat_up[2, n] <- mean(df$w_df) ## double faults
Tstat_up[3, n] <- mean(df$w_1stIn/df$w_svpt, na.rm = TRUE) ## First serve percentage
Tstat_up[4, n] <- mean(df$w_1stWon/df$w_1stIn, na.rm = TRUE) ## First serve winning percentages
Tstat_up[5, n] <- mean(df$w_2ndWon/(df$w_svpt- df$w_1stIn), na.rm = TRUE) ## Second serve winning percentages
Tstat_up[6, n] <- mean(ifelse(df$w_bpFaced == 0, 0, df$w_bpSaved/df$w_bpFaced)) ## break points saved (%)
Tstat_up[7, n] <- mean(df$w_bpFaced) ## break points faced (n)
Tstat_up[8, n] <- mean((df$l_svpt-(df$l_1stWon+df$l_2ndWon))/df$l_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
# find the improvement % in upsets winners performance from regular to upset matches :
Tstat_up_per <- data.frame(matrix(ncol = 5, nrow = 8))
x <- c("Stat", "Clay", "Grass","Hard", "Avg%")
colnames(Tstat_up_per) <- x
# stats of interest:
Tstat_up_per$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
Tstat_up_per[,2:4] <- ( Tstat_up[,2:4] - Tstat2[, 2:4] )*100/Tstat2[,2:4]
Tstat_up_per[,5] <- rowMeans(Tstat_up_per[,2:4])
# ......................
# mean of losers in upset matches based on surfaces:
Tstat_Ra <- data.frame(matrix(ncol = 4, nrow = 8))
x <- c("Stat", "C", "G","H")
colnames(Tstat_Ra) <- x
# stats of interest:
Tstat_Ra$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
n <- 1
for (s in surface){
df <- upsets %>% filter(surface == s)
n <- n +1
Tstat_Ra[1, n] <- mean(df$l_ace) ## Aces
Tstat_Ra[2, n] <- mean(df$l_df) ## double faults
Tstat_Ra[3, n] <- mean(df$l_1stIn/df$l_svpt,na.rm = TRUE) ## First serve percentage
Tstat_Ra[4, n] <- mean(df$l_1stWon/df$l_1stIn, na.rm = TRUE) ## First serve winning percentages
Tstat_Ra[5, n] <- mean(df$l_2ndWon/(df$l_svpt- df$l_1stIn), na.rm = TRUE) ## Second serve winning percentages
Tstat_Ra[6, n] <- mean(ifelse(df$l_bpFaced == 0, 0, df$l_bpSaved/df$l_bpFaced)) ## break points saved (%)
Tstat_Ra[7, n] <- mean(df$l_bpFaced) ## break points faced (n)
Tstat_Ra[8, n] <- mean((df$w_svpt-(df$w_1stWon+df$w_2ndWon))/df$w_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
# find the improvement % in upsets winners performance from regular to upset matches :
Tstat_Ra_per <- data.frame(matrix(ncol = 5, nrow = 8))
x <- c("Stat", "Clay", "Grass","Hard", "Avg%")
colnames(Tstat_Ra_per) <- x
# stats of interest:
Tstat_Ra_per$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
Tstat_Ra_per[,2:4] <- ( Tstat_Ra[,2:4] - Tstat2[, 5:7] )*100/Tstat2[,5:7]
Tstat_Ra_per[,5] <- rowMeans(Tstat_Ra_per[,2:4])
# .........
# plots of percentage:
# make a joint:
Plot1 <- Tstat_up_per %>%
full_join(Tstat_Ra_per, by = c("Stat"), suffix = c("_upset", "_regular"))
Plot1 <- Plot1 %>%
rename(
Avg_upset = "Avg%_upset",
Avg_regular = "Avg%_regular"
)
#Plot1 <- Plot1[-8, ]
library(grid)
# from https://stackoverflow.com/questions/18265941/two-horizontal-bar-charts-with-shared-axis-in-ggplot2-similar-to-population-pyr
g.mid<-ggplot(Plot1,aes(x=1,y=Stat))+geom_text(aes(label=Stat), size=7)+
geom_segment(aes(x=0.94,xend=0.96,yend=Stat))+
geom_segment(aes(x=1.04,xend=1.065,yend=Stat))+
ggtitle(" ")+
ylab(NULL)+
scale_x_continuous(expand=c(0,0),limits=c(0.94,1.065))+
theme(axis.title=element_blank(),
panel.grid=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(color=NA),
axis.ticks.x=element_line(color=NA),
plot.margin = unit(c(1,-1,1,-1), "mm"))
g1 <- ggplot(data = Plot1, aes(x = Stat, y = Avg_upset)) +
geom_bar(stat = "identity", fill = "#3CB371") + ggtitle("Upsets Winners' Average Performance Improvment") +
geom_text(aes(label = percent(Avg_upset/100), vjust = 0.5, hjust = 0.4), size=7) +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
plot.title = element_text(size = 20, face = "bold"),
plot.margin = unit(c(1,-1,1,0), "mm")) +
scale_y_reverse() + coord_flip()
g2 <- ggplot(data = Plot1, aes(x = Stat, y = Avg_regular)) +xlab(NULL)+
geom_bar(stat = "identity", fill = "#DC143C") + ggtitle("Upsets Losers' Average Performance Drop") +
geom_text(aes(label = percent(Avg_regular/100), vjust = 0.5, hjust = 0.4), size=7) +
theme(axis.title.x = element_blank(), axis.title.y = element_blank(),
axis.text.y = element_blank(), axis.ticks.y = element_blank(),
plot.title = element_text(size = 20, face = "bold"),
plot.margin = unit(c(1,0,1,-1), "mm")) +
coord_flip()
library(gridExtra)
gg1 <- ggplot_gtable(ggplot_build(g1))
gg2 <- ggplot_gtable(ggplot_build(g2))
gg.mid <- ggplot_gtable(ggplot_build(g.mid))
grid.arrange(gg1,gg.mid,gg2,ncol=3,widths=c(4/9,1/9,4/9))
################################################################################ | /R-Code/P2-Comparasion.R | no_license | Sundes-Star/PID557 | R | false | false | 37,080 | r |
# Read in Files:
# Adapted From:
# https://blog.exploratory.io/how-to-read-multiple-excel-or-csv-files-together-42af5d314a10
# Since all file share the same data structure, Combine the dataaset into one data frame for easy manipulation
datset <-list.files(pattern="*.csv")
ATPSeasons <- sapply(datset, read_csv, simplify=FALSE) %>%
bind_rows(.id = "id")
ATPSeasons %>% group_by(tourney_name) %>%
count(tourney_name)
# 827 tournments
#################################################################################
# Manipulation:
# Cleaning:
ATPSeasons %>%
filter(score %in% c("RET", "W/O"))
# 171
# Delete rows with incomplete matches
ATPSeasons<-ATPSeasons[!(ATPSeasons$score=="RET" | ATPSeasons$score=="W/O"),]
# drop rows that have NA statistics:
ATPSeasons<-ATPSeasons[!(is.na(ATPSeasons$w_ace)),] # when ace is NA, mostly, the rest of the statistics are NA
# Exclude Davis Cup:
ATPSeasons <- ATPSeasons %>%
filter(tourney_level != "D")
dim(ATPSeasons)
# 50 * 26507
# id as a year for easy analysis by year:
ATPSeasons$id <-
ifelse(
ATPSeasons$id == "atp_matches_2009.csv",
"2009",
ifelse(
ATPSeasons$id == "atp_matches_2010.csv",
"2010",
ifelse(
ATPSeasons$id == "atp_matches_2011.csv",
"2011",
ifelse(
ATPSeasons$id == "atp_matches_2012.csv",
"2012",
ifelse(
ATPSeasons$id == "atp_matches_2013.csv",
"2013",
ifelse(
ATPSeasons$id == "atp_matches_2014.csv",
"2014",
ifelse(
ATPSeasons$id == "atp_matches_2015.csv",
"2015",
ifelse(
ATPSeasons$id == "atp_matches_2016.csv",
"2016",
ifelse(
ATPSeasons$id == "atp_matches_2017.csv",
"2017",
ifelse(ATPSeasons$id == "atp_matches_2018.csv", "2018", NA)
)
)
)
)
)
)
)
)
)
####################################################################################################################
# EDA
# number of matches per level:
table(ATPSeasons$tourney_level)
# percentage of each level
# reorder the level according to tier:
ATPSeasons <- arrange(transform(ATPSeasons,
tourney_level=factor(tourney_level,levels=c("G", "M", "A","F","D"))),tourney_level)
ATPSeasons <- arrange(transform(ATPSeasons,
round=factor(round,levels=c("F", "SF", "QF", "R16", "R32", "R64", "R128", "PR"))),round)
ATPSeasonsLp <- ATPSeasons %>%
group_by(id) %>%
count(tourney_level) %>%
mutate(perc = n*100 / sum(n))
ATPSeasonsLp %>%
group_by(id) %>%
ggplot(aes(x = tourney_level, y = perc)) +
geom_bar(stat = "identity", fill='steelblue', color='black') +
theme(text = element_text(size = 20)) +
geom_text(aes(label = percent(round(perc)/100), vjust = 0.5),size = 7) +
facet_wrap(id ~.) +
xlab("Tourney Level") + ylab("Percentage %")
# ..................
# Surface
# percentage of each surface
ATPSeasonsLp <- ATPSeasons %>%
group_by(id) %>%
count(surface) %>%
mutate(perc = n*100 / sum(n))
ATPSeasonsLp %>%
group_by(id) %>%
ggplot(aes(x = surface, y = perc)) +
geom_bar(stat = "identity", fill='steelblue', color='black') +
theme(text = element_text(size = 20)) +
geom_text(aes(label = percent(round(perc)/100), vjust = 1),size = 7) +
facet_wrap(id ~.) +
xlab("Surface") + ylab("Percentage %")
# ..................
# winner_hand
table(ATPSeasons$winner_hand)
ATPSeasons$winner_hand[is.na(ATPSeasons$winner_hand)] <- "U"
sum(is.na(ATPSeasons$winner_hand))
# loser_hand
table(ATPSeasons$loser_hand)
ATPSeasons$loser_hand[is.na(ATPSeasons$loser_hand)] <- "U"
sum(is.na(ATPSeasons$loser_hand))
# ..................
# Age difference distribution:
ATPSeasons$agedif <- ATPSeasons$winner_age - ATPSeasons$loser_age
# Adapted From:
# https://www.kaggle.com/ambarish/omnibus-womens-and-mens-tennis-matches-analysis
ATPSeasons %>%
ggplot(aes(x = agedif)) +
geom_density(fill = "steelblue") +
labs(x= 'Age Difference',y = 'Count', title = paste("Distribution of Age Difference")) +
theme_bw()
sd(ATPSeasons$agedif, na.rm = TRUE)
# 5.365677
# age mean
plot1 <- ATPSeasons %>%
group_by(id) %>%
summarise(Winners = mean(winner_age, na.rm = TRUE), Losers = mean(loser_age, na.rm = TRUE) ) %>%
pivot_longer(c(Winners, Losers), names_to = "Type", values_to = "Age") %>%
ggplot(aes(id, Age, group = Type, color = Type)) + geom_line(size = 1) +
labs(title = paste(" (a) Distribution of Age Difference")) +
theme(text = element_text(size = 20), legend.position = "none") +
labs(x= "Year", y="Mean Age")
# ..................
# Hight difference distribution:
ATPSeasons$htdif <- ATPSeasons$winner_ht - ATPSeasons$loser_ht
# Adapted From:
# https://www.kaggle.com/ambarish/omnibus-womens-and-mens-tennis-matches-analysis
ATPSeasons %>%
ggplot(aes(x = htdif)) +
geom_density(fill = "steelblue") +
labs(x= 'Height Difference',y = 'Count', title = paste("Distribution of Height Difference")) +
theme_bw()
sd(ATPSeasons$htdif, na.rm = TRUE)
# 10.11299
# age mean
plot2 <- ATPSeasons %>%
group_by(id) %>%
summarise(Winners = mean(winner_ht, na.rm = TRUE), Losers = mean(loser_ht, na.rm = TRUE) ) %>%
pivot_longer(c(Winners, Losers), names_to = "Type", values_to = "Hight") %>%
ggplot(aes(id, Hight, group = Type, color = Type)) + geom_line(size = 1) +
labs(title = paste(" (b) Distribution of Height Difference")) +
theme(text = element_text(size = 20)) +
labs(x= "Year", y="Mean Height")
grid.arrange(plot1, plot2, ncol=2)
# ..................
# best of
table(ATPSeasons$best_of)
# 3 5
# 21445 5062
# ..................
# who won the most matches each season in the last 10 years before 2019?
winnersT<- ATPSeasons %>%
group_by(id, winner_name)%>%
mutate(wins = n())%>%
arrange(id)
summary(winnersT)
# Investigate correlation between Tournaments wins and Wins number / win ratio each season:
# wins number:
ATPSeasons <- ATPSeasons %>%
group_by(id, winner_name)%>%
mutate(winers_wins = n())
# Win ratio:
# loses of winners:
Lossdf <- ATPSeasons %>% group_by(id, loser_name)%>%
mutate(winers_loses = n())
# join 2 df:
ATPSeasons <- ATPSeasons %>%
inner_join(Lossdf, by = c("winner_name" = "loser_name", "id" = "id")) %>%
mutate(winner_wr = winers_wins*100/(winers_wins + winers_loses))
# Tournaments wins:
TwinnersT <- ATPSeasons %>% filter(round == "F") %>%
group_by(id, winner_name)%>%
mutate(Twins = n())
# correlation:
Win_Cor_Season <- TwinnersT %>% group_by(id)%>%
summarise(Tour_Wnum = cor(Twins,winers_wins), Tour_WRatio = cor(Twins,winner_wr))
# plot:
Win_Cor_Season %>%
pivot_longer(c(Tour_Wnum, Tour_WRatio), names_to = "Type", values_to = "Corr") %>%
ggplot(aes(x = id, y = Corr, group = Type, color = Type)) +
geom_line(size =1) + theme(axis.text.x = element_text(hjust = 1)) +
theme(text = element_text(size = 20)) +
labs(x= "Year", y="Correlation Ratio")
cor.test(TwinnersT$Twins,TwinnersT$winers_wins) # 2.2e-16
cor.test(TwinnersT$Twins,TwinnersT$winner_wr) # 2.2e-16
# inspect 2014 season:
Twinners2014 <- TwinnersT %>% filter(id == "2014") %>%
group_by(winner_name)%>%
summarize(Twins = n())%>%
arrange(desc(Twins))
model1 <- lm(Twins ~ winers_wins, data = TwinnersT)
summary(model1)
model2 <- lm(Twins ~ winner_wr, data = TwinnersT)
summary(model2)
# .................
# Investigate winners performance per surface in the Final rounds:
# the winner of the final round will be the tournament winner.
## Tournament winners by surface: Players win ratio per surface for the tournament winners
Twinners2014 %>%
inner_join(ATPSeasons %>% filter(id == "2014"), by = "winner_name", suffix = c("_19", "_TW") ) %>%
group_by(winner_name) %>%
count(surface) -> TWW
Twinners2014 %>%
inner_join(ATPSeasons %>% filter(id == "2014"), by = c("winner_name" = "loser_name")) %>%
group_by(winner_name) %>%
count(surface) -> TWL
TWW %>%
inner_join(TWL, by = "winner_name", suffix = c("_W", "_L") ) -> df1
df1$surface <- NA
df1$Sprec <- NA
df1 <- df1 %>% pivot_wider(names_from = surface_W, values_from = n_W, values_fill = 0) %>%
pivot_longer(c(Clay, Grass, Hard), names_to = "surface_W", values_to = "n_W") %>%
pivot_wider(names_from = surface_L, values_from = n_L, values_fill = 0) %>%
pivot_longer(c(Clay, Grass, Hard), names_to = "surface_L", values_to = "n_L")
df1$surface <- unlist(df1$surface)
for (i in 1:nrow(df1)) {
if (df1[i,4] == df1[i,6]){
df1$surface[i] <- df1[i,4]
df1$Sprec[i] <- df1$n_W[i]*100/(df1$n_W[i]+ df1$n_L[i])
}
}
df1 <- na.omit(df1)
df1$surface <- unlist(df1$surface)
df1 %>%
ggplot(aes(winner_name, y=Sprec, fill = surface)) +
geom_col() + theme(text = element_text(size = 20), axis.text.x = element_text(angle = 90, hjust = 1)) +
geom_text(aes(label = round(Sprec,2)), position = position_stack(vjust = 0.5), fontface = "bold")+
xlab("Winner Name") + ylab("Percentage of Wins %")
df1 %>% group_by(winner_name)
# ..................
# Players' Ranks:
Season2014 <- ATPSeasons %>% filter(id == "2014")
TSeason2014 <- TwinnersT %>% filter(id == "2014")
summary(Season2014$winner_rank)
sum(is.na(Season2014$winner_rank))
g1 <- Season2014 %>%
ggplot(aes(x = winner_rank)) +
geom_density(aes( y=..scaled..), fill='steelblue', color='black') +
theme(text = element_text(size = 20), axis.text.x = element_text(hjust = 0.5)) +
xlim(1,503) + xlab("Winner Rank") + ylab("Scale") +
ggtitle(" (a) The Density Estimate of Winners Ranks") +
theme(plot.title = element_text(size = 20, face = "bold"))
g2 <- Season2014 %>%
filter(winner_rank <=73)%>%
count(winner_rank) %>%
ggplot(aes(x = winner_rank, y = n)) +
geom_col(fill='steelblue', color='black') +
theme(text = element_text(size = 20), axis.text.x = element_text(hjust = 0.5)) +
xlim(1,76) + xlab("Winner Rank") + ylab("Count") +
ggtitle(" (b) The 75th percentile of Winners Ranks") +
theme(plot.title = element_text(size = 20, face = "bold"))
# The distribution is sort of uniform especially form rank 10 to 76
summary(TSeason2014$winner_rank)
g3 <- ggplot(TSeason2014 %>% filter(winner_rank <=30), aes(x=winner_rank, fill = surface)) +
geom_bar() +
theme(text = element_text(size = 20), axis.text.x = element_text(hjust = 0.5)) +
facet_wrap(tourney_level~.) +
xlab("Winner Rank") + ylab("Count") +
ggtitle(" (c) Ranks Distribution of Tournament Winners by Level and Surface ") +
theme(plot.title = element_text(size = 20, face = "bold"))
grid.arrange(arrangeGrob(g1),
arrangeGrob(g2,g3, ncol=1),
ncol=2, widths=c(1,1))
# ..................
# Player seeds
Season2014 <- ATPSeasons %>% filter(id == "2014")
Season2014 %>%
arrange(winner_rank) %>%
group_by(winner_rank) %>%
count()
Season2014 %>%
count(winner_rank) %>%
ggplot(aes(x = winner_rank)) +
geom_histogram(fill='steelblue') +
theme(axis.text.x = element_text(angle = 90, hjust = 0.5)) +
scale_x_continuous("Winner Seed", labels = as.character(Season2014$winner_rank), breaks = Season2014$winner_rank) +
ylab("Count")
table(Season2014$winner_rank, Season2014$tourney_level)
ATPSeasons %>%
filter(winner_rank <=100) %>%
count(winner_rank) %>%
ggplot(aes(x = winner_rank)) +
geom_histogram(fill='steelblue') +
theme(axis.text.x = element_text(angle = 90, hjust = 0.5)) +
scale_x_continuous("Winner Rank", labels = as.character(Season2014$winner_rank), breaks = Season2014$winner_rank) +
theme(text = element_text(size = 20)) +
xlab("Winner Rank") + ylab("Count") + xlim(0, 100) + facet_wrap(id~.)
# * higher seeded players win more matches than lower ranked seed players.(para)
# Are seeds more likely to win tournaments?
# Seeds
TwinnersT %>%
group_by(winner_seed) %>%
count() %>%
arrange(desc(n))
ggplot(TwinnersT, aes(x=winner_seed, fill = tourney_level)) +
geom_histogram() +
theme(text = element_text(size = 20)) +
xlab("Winner Seed") + ylab("Count") +
facet_wrap(id~.)
# Ranks
TwinnersT %>%
group_by(winner_rank) %>%
count() %>%
arrange(desc(n))
ggplot(TwinnersT, aes(x=winner_rank, fill = tourney_level)) +
geom_histogram() +
theme(text = element_text(size = 20)) +
xlab("Winner Seed") + ylab("Count") + xlim(0, 300) +
facet_wrap(id~.)
#################################################################################################################
# Match Statistics: Winners and Losers
# Aces
# #Number of aces as a predictor
Yearsdf = c(2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018)
ace_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
ace_dif<- na.omit(df$w_ace) - na.omit(df$l_ace)
ace_dif <- ifelse(ace_dif > 0 , 1 , 0)
#what percentage of time did winner hit more aces than the loser
ace_per[n] <- sum(ace_dif, na.rm = TRUE)*100/length(ace_dif)
n <- n +1
}
# statistics:
summary(Season2014$w_ace)
summary(Season2014$l_ace)
# .......................
# Adapted From:
# https://medium.com/swlh/predicting-atp-tennis-match-outcomes-using-serving-statistics-cde03d99f410
# Double faults faults as a predictor
# difference between number of double faults hit by the loser and the winner
df_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
df_dif<- na.omit(df$l_df) - na.omit(df$w_df)
df_dif <- ifelse(df_dif > 0 , 1 , 0)
#what percentage of time did losers make more double faults than the winners
df_per[n] <- sum(df_dif, na.rm = TRUE)*100/length(df_dif)
n <- n +1
}
# statistics:
summary(Season2014$w_ace)
summary(Season2014$l_ace)
# .......................
# The first serve percentage
fsp_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
p1spw <- df$w_1stIn/df$w_svpt # winner
p1spl <- df$l_1stIn/df$l_svpt # loser
p1sd <- ifelse((p1spw - p1spl) > 0 , 1 , 0)
#percentage of time winner had a better 1st serve percentage
fsp_per[n] <- sum(p1sd, na.rm = TRUE)*100/length(p1sd)
n <- n +1
}
# .......................
#First and second serve winning percentages
# First
fswp_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
w1swp <- df$w_1stWon/ df$w_1stIn
w1slp <- df$l_1stWon/ df$l_1stIn
f1swpd <- ifelse((w1swp - w1slp) > 0 , 1 , 0)
#percentage of time the winning player had a higher winning percentage on 1st serve
fswp_per[n] <- sum(f1swpd, na.rm = TRUE)*100/length(f1swpd)
n <- n +1
}
# Second
S2swp_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
w2swp <- df$w_2ndWon/ (df$w_svpt- df$w_1stIn)
w2slp <- df$l_2ndWon/ (df$l_svpt- df$l_1stIn)
s2swpd <- ifelse((w2swp - w2slp) > 0 , 1 , 0)
#percentage of time the winning player had a higher winning percentage on 2nd serve
S2swp_per[n] <- sum(s2swpd, na.rm = TRUE)*100/length(s2swpd)
n <- n +1
}
# .......................
# Break Points
# Break points faced
bpf_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
bpfd <- ifelse((df$w_bpFaced - df$l_bpFaced) > 0 , 1 , 0)
#percentage
bpf_per[n] <- sum(bpfd, na.rm = TRUE)*100/length(bpfd)
n <- n +1
}
# break points saved
bps_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
df$w_bpSavedp <- ifelse(df$w_bpFaced == 0, 0, df$w_bpSaved*100/df$w_bpFaced)
df$l_bpSavedp <- ifelse(df$l_bpFaced == 0, 0, df$l_bpSaved*100/df$l_bpFaced)
bpsd <- ifelse((df$w_bpSavedp - df$l_bpSavedp) > 0 , 1 , 0)
#percentage
bps_per[n] <- sum(bpsd, na.rm = TRUE)*100/length(bpsd)
n <- n +1
}
# Return Games Won %
Ret_per <- vector()
n <- 1
for (y in Yearsdf){
df <- ATPSeasons %>% filter(id == y)
df$w_Retpwp <- (df$l_svpt-(df$l_1stWon+df$l_2ndWon))*100/df$l_svpt
df$l_Retpwp <- (df$w_svpt-(df$w_1stWon+df$w_2ndWon))*100/df$w_svpt
Retd <- ifelse((df$w_Retpwp - df$l_Retpwp) > 0 , 1 , 0)
#percentage
Ret_per[n] <- sum(Retd, na.rm = TRUE)*100/length(Retd)
n <- n +1
}
# data frame of percentage difference:
per_diff_stat <- data.frame(matrix(ncol = 11, nrow = 8))
x <- c("Stat", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018")
colnames(per_diff_stat) <- x
# stats of interest:
per_diff_stat$Stat <- c("Aces", "Double Faults", "First Serve Percentage",
"1st Serve Win %", "2nd Serve Win %",
"Break Points Saved", "Break Points Faced", "Return Games Won %")
mStat <- data.frame(ace_per, df_per, fsp_per, fswp_per, S2swp_per, bps_per, bpf_per, Ret_per)
n <- 1
for(n in 1:length(mStat)){
per_diff_stat[n,2:11] <- mStat[,n]
n <- n+1
}
################################################################################
################################################################################
################################################################################
## Upset matches:
# Define:
# rank difference > 15
# seeds were inspected and they are not very accurate
# will build on ranks
## How many times occur in the last 10 years?
# Create upset column : (1 means the match was an upset)
ATPSeasons$Upset15 <- ifelse(ATPSeasons$winner_rank - ATPSeasons$loser_rank > 15, 1, 0)
ATPSeasons %>%
group_by(id) %>%
summarise(upsets_num = sum(Upset15, na.rm = TRUE)) %>%
ggplot(aes(id, upsets_num, group = 1)) + geom_line(size = 1) +
theme(text = element_text(size = 20))+
labs(x= "Year", y="Number of Upset Matches")
# Investigate upset matches: any patterns?
# remove NA:
ATPSeasons <- ATPSeasons[!(is.na(ATPSeasons$Upset15)),]
# Surface
# percentage of each surface
# upset:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id) %>%
count(surface) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = surface, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# regular:
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id) %>%
count(surface) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = surface, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# .................................
# tourney_level
# upset:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id) %>%
count(tourney_level) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = tourney_level, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# regular:
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id) %>%
count(tourney_level) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = tourney_level, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# .................................
# round:
# upset:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id) %>%
count(round) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = round, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# regular:
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id) %>%
count(round) %>%
mutate(perc = n*100 / sum(n)) %>%
ggplot(aes(x = round, y = perc)) +
geom_bar(stat = "identity",fill='steelblue') +
geom_text(aes(label = percent(round(perc)/100), vjust = -0.5)) +
facet_wrap(id ~.)
# .................................
# minutes:
ATPSeasons %>%
filter(Upset15 == 1) %>%
group_by(id )-> m1
summary(m1$minutes)
ATPSeasons %>%
filter(Upset15 == 0) %>%
group_by(id )-> m0
summary(m0$minutes)
mperavg <- (mean(m1$minutes, na.rm = TRUE)-mean(m0$minutes, na.rm = TRUE))*200/(mean(m1$minutes, na.rm = TRUE)+mean(m0$minutes, na.rm = TRUE))
# 3.17%
# 2014
ATPSeasons %>%
filter(id == "2014", Upset15 == 1) -> m1
summary(m1$minutes)
ATPSeasons %>%
filter(id == "2014", Upset15 == 0) -> m0
summary(m0$minutes)
mperavg <- (mean(m1$minutes, na.rm = TRUE)-mean(m0$minutes, na.rm = TRUE))*200/(mean(m1$minutes, na.rm = TRUE)+mean(m0$minutes, na.rm = TRUE))
# 0.17%
# .................................
# match statistics 2014:
# Aces
# #Number of aces as a predictor
# upsets:
U <- ATPSeasons %>%
filter(id == "2014", Upset15 == 1)
ace_dif <- na.omit(U$w_ace) - na.omit(U$l_ace)
ace_dif <- ifelse(ace_dif > 0 , 1 , 0)
#what percentage of time did winner hit more aces than the loser
sum(ace_dif, na.rm = TRUE)*100/length(ace_dif)
# 55.33662
# statistics:
summary(U$w_ace)
summary(U$l_ace)
# Regular:
R <- ATPSeasons %>%
filter(id == "2014", Upset15 == 0)
ace_dif <- na.omit(R$w_ace) - na.omit(R$l_ace)
ace_dif <- ifelse(ace_dif > 0 , 1 , 0)
#what percentage of time did winner hit more aces than the loser
sum(ace_dif, na.rm = TRUE)*100/length(ace_dif)
# 61.97%
# statistics:
summary(R$w_ace)
summary(R$l_ace)
#-------------------
# Double faults
# Number of double faults as a predictor
# difference between number of double faults hit by the loser and the winner
# Upsets:
df_dif <- U$l_df - U$w_df
#number of times loser hit more double faults in a match
df_dif <- ifelse(df_dif > 0 , 1 , 0)
#what percentage of time did loser hit more double faults than the winner
sum(df_dif, na.rm = TRUE)*100/length(df_dif)
# 53.69 %
summary(U$w_df)
summary(U$l_df)
# Regular:
df_dif <- R$l_df - R$w_df
#number of times loser hit more double faults in a match
df_dif <- ifelse(df_dif > 0 , 1 , 0)
#what percentage of time did loser hit more double faults than the winner
sum(df_dif, na.rm = TRUE)*100/length(df_dif)
# 51.12 %
summary(R$w_df)
summary(R$l_df)
# .................................
# #first serve percentage = number of first serves made / number of serve points
#upsets:
p1spw <- U$w_1stIn/U$w_svpt # winner
p1spl <- U$l_1stIn/U$l_svpt # loser
summary(p1spw)
summary(p1spl)
#First serve percentage as a predictor
#percentage of time winner had a better 1st serve percentage
p1sd <- ifelse((p1spw - p1spl) > 0 , 1 , 0)
sum(p1sd, na.rm = TRUE)*100/length(p1sd)
# 51.4 %
# Regular:
p1spw <- R$w_1stIn/R$w_svpt # winner
p1spl <- R$l_1stIn/R$l_svpt # loser
summary(p1spw)
summary(p1spl)
#First serve percentage as a predictor
#percentage of time winner had a better 1st serve percentage
p1sd <- ifelse((p1spw - p1spl) > 0 , 1 , 0)
sum(p1sd, na.rm = TRUE)*100/length(p1sd)
# 56.31 %
#First and second serve winning percentages
# #first serve winning percentage = number of first-serve points won / number of first serves made
# Upsets:
w1swp <- U$w_1stWon/ U$w_1stIn
w1slp <- U$l_1stWon/ U$l_1stIn
#percentage of time the winning player had a higher winning percentage on 1st serve
f1swpd <- ifelse((w1swp - w1slp) > 0 , 1 , 0)
sum(f1swpd, na.rm = TRUE)*100/length(f1swpd)
# 77.18%
summary(w1swp)
summary(w1slp)
# Regular:
w1swp <- R$w_1stWon/R$w_1stIn
w1slp <- R$l_1stWon/R$l_1stIn
#percentage of time the winning player had a higher winning percentage on 1st serve
f1swpd <- ifelse((w1swp - w1slp) > 0 , 1 , 0)
sum(f1swpd, na.rm = TRUE)*100/length(f1swpd)
# 85.23%
summary(w1swp)
summary(w1slp)
#second serve winning percentage
# Upsets:
w2swp <- U$w_2ndWon/ (U$w_svpt- U$w_1stIn)
w2slp <- U$l_2ndWon/ (U$l_svpt- U$l_1stIn)
# #percentage of time the winning player had a higher winning percentage on 2nd serve
s2swpd <- ifelse((w2swp - w2slp) > 0 , 1 , 0)
sum(s2swpd, na.rm = TRUE)*100/length(s2swpd)
# 75.53%
summary(w2swp)
summary(w2slp)
# Regular:
w2swp <- R$w_2ndWon/ (R$w_svpt - R$w_1stIn)
w2slp <- R$l_2ndWon/ (R$l_svpt - R$l_1stIn)
# #percentage of time the winning player had a higher winning percentage on 2nd serve
s2swpd <- ifelse((w2swp - w2slp) > 0 , 1 , 0)
sum(s2swpd, na.rm = TRUE)*100/length(s2swpd)
# 76.63%
summary(w2swp)
summary(w2slp)
# .................................
# Break points faced
# Upset
bpf <- data.frame(winner = na.omit(U$w_bpFaced), loser = na.omit(U$l_bpFaced) )
bpfd <- ifelse((bpf$loser - bpf$winner) > 0 , 1 , 0)
sum(bpfd, na.rm = TRUE)*100/length(bpfd)
# 71.43%
summary(bpf)
# Regular:
bpf <- data.frame(winner = na.omit(R$w_bpFaced), loser = na.omit(R$l_bpFaced) )
summary(bpf)
bpfd <- ifelse((bpf$loser - bpf$winner) > 0 , 1 , 0)
sum(bpfd, na.rm = TRUE)*100/length(bpfd)
#78.72%
# .................................
# break points saved
# upset:
bps <- data.frame(winner = na.omit(U$w_bpSaved), loser = na.omit(U$l_bpSaved) )
bpsd <- ifelse((bps$winner - bps$loser) > 0 , 1 , 0)
sum(bpsd, na.rm = TRUE)*100/length(bpsd)
# 32.18%
summary(bps)
# Regular:
bps <- data.frame(winner = na.omit(R$w_bpSaved), loser = na.omit(R$l_bpSaved) )
bpsd <- ifelse((bps$winner - bps$loser) > 0 , 1 , 0)
sum(bpsd, na.rm = TRUE)*100/length(bpsd)
# 29.07%
summary(bps)
###############################################################################
# Upsets vs Reg for all years:
# upsets:
U <- ATPSeasons %>%
filter(Upset15 == 1)
# Regular:
R <- ATPSeasons %>%
filter(Upset15 == 0)
# another script
##############################################################################
# Adapted From: https://tennismash.com/2016/01/21/302/
# Compare the performance of the upset winner to their average performance on the same surface
upsets <- ATPSeasons %>%
filter(id == "2014", Upset15 == 1) # 609
regular <- ATPSeasons %>%
filter(id == "2014", Upset15 == 0) # 1964
## Find the statistics of the upsets winners in regular matches: df1 (as winners in R) & df2 (as losers in R)
## 1- upsets winners as winners of regular:
df1 <- upsets %>%
select(winner_id) %>%
distinct(winner_id) %>%
inner_join(regular, by = c("winner_id" = "winner_id"))
## 2- upsets winners as losers of regular:
df2 <- upsets %>%
select(winner_id) %>%
distinct(winner_id) %>%
inner_join(regular, by = c("winner_id" = "loser_id"))
## Find the statistics of the upsets losers in regular matches: df3 (as winners in R) & df4 (as losers in R)
## 3- upsets losers as winners of regular:
df3 <- upsets %>%
select(loser_id) %>%
distinct(loser_id) %>%
inner_join(regular, by = c("loser_id" = "winner_id"))
## 4- upsets losers as losers of regular:
df4 <- upsets %>%
select(loser_id) %>%
distinct(loser_id) %>%
inner_join(regular, by = c("loser_id" = "loser_id"))
# ...................
## statistics of the upsets winners in regular matches (df1 & df2)
## Then compare these statistics in upset matches in the same surface: (upsets)
data_list <- list(df1, df2, df3, df4)
Tstat <- data.frame(matrix(ncol = 13, nrow = 8))
x <- c("Stat", "df1C", "df1G","df1H", "df2C", "df2G","df2H","df3C", "df3G","df3H","df4C", "df4G","df4H")
colnames(Tstat) <- x
# stats of interest:
Tstat$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
surface <- c("Clay", "Grass", "Hard")
n <- 1
dn <- 0
for (d in data_list){
dn <- dn + 1
if(dn == 1 | dn == 3){
ifelse(n <= 4, n , 7)
for (s in surface){
df <- d %>% filter(surface == s)
n <- n +1
Tstat[1, n] <- mean(df$w_ace) ## Aces
Tstat[2, n] <- mean(df$w_df) ## double faults
Tstat[3, n] <- mean(df$w_1stIn/df$w_svpt, na.rm = TRUE) ## First serve percentage
Tstat[4, n] <- mean(df$w_1stWon/df$w_1stIn, na.rm = TRUE) ## First serve winning percentages
Tstat[5, n] <- mean(df$w_2ndWon/(df$w_svpt- df$w_1stIn), na.rm = TRUE) ## Second serve winning percentages
Tstat[6, n] <- mean(ifelse(df$w_bpFaced == 0, 0, df$w_bpSaved/df$w_bpFaced)) ## break points saved (%)
Tstat[7, n] <- mean(df$w_bpFaced) ## break points faced (n)
Tstat[8, n] <- mean((df$l_svpt-(df$l_1stWon+df$l_2ndWon))/df$l_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
} else {
ifelse(n <= 7, n , 10)
for (s in surface){
df <- d %>% filter(surface == s)
n <- n +1
Tstat[1, n] <- mean(df$l_ace) ## Aces
Tstat[2, n] <- mean(df$l_df) ## double faults
Tstat[3, n] <- mean(df$l_1stIn/df$l_svpt) ## First serve percentage
Tstat[4, n] <- mean(df$l_1stWon/df$l_1stIn) ## First serve winning percentages
Tstat[5, n] <- mean(df$l_2ndWon/(df$l_svpt- df$l_1stIn)) ## Second serve winning percentages
Tstat[6, n] <- mean(ifelse(df$l_bpFaced == 0, 0, df$l_bpSaved/df$l_bpFaced)) ## break points saved (%)
Tstat[7, n] <- mean(df$l_bpFaced) ## break points faced (n)
Tstat[8, n] <- mean((df$w_svpt-(df$w_1stWon+df$w_2ndWon))/df$w_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
}
}
# mean in regular matches: upset winners df1 & df2 ; upset losers: df3 & df4
Tstat2 <- data.frame(matrix(ncol = 7, nrow = 8))
x <- c("Stat", "dUC", "dUG","dUH", "dRC", "dRG","dRH") # U: upset winners ; R: upset losers
colnames(Tstat2) <- x
# stats of interest:
Tstat2$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
n <- 1
h <- 1
for (c in 1:2){
for (s in surface){
ifelse(n == 4, 7, n)
n <- n +1
h <- h +1
for (i in 1:8){
Tstat2[i, h] <- (Tstat[i, n]+Tstat[i, n+3])/2
}
}
}
# ........
# mean of winners in upset matches based on surfaces:
Tstat_up <- data.frame(matrix(ncol = 4, nrow = 8))
x <- c("Stat", "C", "G","H")
colnames(Tstat_up) <- x
# stats of interest:
Tstat_up$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
n <- 1
for (s in surface){
df <- upsets %>% filter(surface == s)
n <- n +1
Tstat_up[1, n] <- mean(df$w_ace) ## Aces
Tstat_up[2, n] <- mean(df$w_df) ## double faults
Tstat_up[3, n] <- mean(df$w_1stIn/df$w_svpt, na.rm = TRUE) ## First serve percentage
Tstat_up[4, n] <- mean(df$w_1stWon/df$w_1stIn, na.rm = TRUE) ## First serve winning percentages
Tstat_up[5, n] <- mean(df$w_2ndWon/(df$w_svpt- df$w_1stIn), na.rm = TRUE) ## Second serve winning percentages
Tstat_up[6, n] <- mean(ifelse(df$w_bpFaced == 0, 0, df$w_bpSaved/df$w_bpFaced)) ## break points saved (%)
Tstat_up[7, n] <- mean(df$w_bpFaced) ## break points faced (n)
Tstat_up[8, n] <- mean((df$l_svpt-(df$l_1stWon+df$l_2ndWon))/df$l_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
# find the improvement % in upsets winners performance from regular to upset matches :
Tstat_up_per <- data.frame(matrix(ncol = 5, nrow = 8))
x <- c("Stat", "Clay", "Grass","Hard", "Avg%")
colnames(Tstat_up_per) <- x
# stats of interest:
Tstat_up_per$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
Tstat_up_per[,2:4] <- ( Tstat_up[,2:4] - Tstat2[, 2:4] )*100/Tstat2[,2:4]
Tstat_up_per[,5] <- rowMeans(Tstat_up_per[,2:4])
# ......................
# mean of losers in upset matches based on surfaces:
Tstat_Ra <- data.frame(matrix(ncol = 4, nrow = 8))
x <- c("Stat", "C", "G","H")
colnames(Tstat_Ra) <- x
# stats of interest:
Tstat_Ra$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
n <- 1
for (s in surface){
df <- upsets %>% filter(surface == s)
n <- n +1
Tstat_Ra[1, n] <- mean(df$l_ace) ## Aces
Tstat_Ra[2, n] <- mean(df$l_df) ## double faults
Tstat_Ra[3, n] <- mean(df$l_1stIn/df$l_svpt,na.rm = TRUE) ## First serve percentage
Tstat_Ra[4, n] <- mean(df$l_1stWon/df$l_1stIn, na.rm = TRUE) ## First serve winning percentages
Tstat_Ra[5, n] <- mean(df$l_2ndWon/(df$l_svpt- df$l_1stIn), na.rm = TRUE) ## Second serve winning percentages
Tstat_Ra[6, n] <- mean(ifelse(df$l_bpFaced == 0, 0, df$l_bpSaved/df$l_bpFaced)) ## break points saved (%)
Tstat_Ra[7, n] <- mean(df$l_bpFaced) ## break points faced (n)
Tstat_Ra[8, n] <- mean((df$w_svpt-(df$w_1stWon+df$w_2ndWon))/df$w_svpt, na.rm = TRUE) ## Return Games Won Percentage
}
# find the improvement % in upsets winners performance from regular to upset matches :
Tstat_Ra_per <- data.frame(matrix(ncol = 5, nrow = 8))
x <- c("Stat", "Clay", "Grass","Hard", "Avg%")
colnames(Tstat_Ra_per) <- x
# stats of interest:
Tstat_Ra_per$Stat <- c("Ace", "DF", "First Serve %",
"1st Srv Won %", "2nd Srv Won %",
"BP Saved %", "BP Faced", "Return %")
Tstat_Ra_per[,2:4] <- ( Tstat_Ra[,2:4] - Tstat2[, 5:7] )*100/Tstat2[,5:7]
Tstat_Ra_per[,5] <- rowMeans(Tstat_Ra_per[,2:4])
# .........
# plots of percentage:
# make a joint:
Plot1 <- Tstat_up_per %>%
full_join(Tstat_Ra_per, by = c("Stat"), suffix = c("_upset", "_regular"))
Plot1 <- Plot1 %>%
rename(
Avg_upset = "Avg%_upset",
Avg_regular = "Avg%_regular"
)
#Plot1 <- Plot1[-8, ]
library(grid)
# from https://stackoverflow.com/questions/18265941/two-horizontal-bar-charts-with-shared-axis-in-ggplot2-similar-to-population-pyr
g.mid<-ggplot(Plot1,aes(x=1,y=Stat))+geom_text(aes(label=Stat), size=7)+
geom_segment(aes(x=0.94,xend=0.96,yend=Stat))+
geom_segment(aes(x=1.04,xend=1.065,yend=Stat))+
ggtitle(" ")+
ylab(NULL)+
scale_x_continuous(expand=c(0,0),limits=c(0.94,1.065))+
theme(axis.title=element_blank(),
panel.grid=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(color=NA),
axis.ticks.x=element_line(color=NA),
plot.margin = unit(c(1,-1,1,-1), "mm"))
g1 <- ggplot(data = Plot1, aes(x = Stat, y = Avg_upset)) +
geom_bar(stat = "identity", fill = "#3CB371") + ggtitle("Upsets Winners' Average Performance Improvment") +
geom_text(aes(label = percent(Avg_upset/100), vjust = 0.5, hjust = 0.4), size=7) +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
plot.title = element_text(size = 20, face = "bold"),
plot.margin = unit(c(1,-1,1,0), "mm")) +
scale_y_reverse() + coord_flip()
g2 <- ggplot(data = Plot1, aes(x = Stat, y = Avg_regular)) +xlab(NULL)+
geom_bar(stat = "identity", fill = "#DC143C") + ggtitle("Upsets Losers' Average Performance Drop") +
geom_text(aes(label = percent(Avg_regular/100), vjust = 0.5, hjust = 0.4), size=7) +
theme(axis.title.x = element_blank(), axis.title.y = element_blank(),
axis.text.y = element_blank(), axis.ticks.y = element_blank(),
plot.title = element_text(size = 20, face = "bold"),
plot.margin = unit(c(1,0,1,-1), "mm")) +
coord_flip()
library(gridExtra)
gg1 <- ggplot_gtable(ggplot_build(g1))
gg2 <- ggplot_gtable(ggplot_build(g2))
gg.mid <- ggplot_gtable(ggplot_build(g.mid))
grid.arrange(gg1,gg.mid,gg2,ncol=3,widths=c(4/9,1/9,4/9))
################################################################################ |
#
# VANTDET:
# output_rulein.R
# N Green
#
# output table of key cost-effectiveness statistics
# for rule-in test
##TODO: DRY duplication in input arguments
dat <- list()
dat[['transcriptomic']] <- dectree(
data = data,
name.newtest = "transcriptomic",
costDistns = costs,
performance = list(performance$transcriptomic_rulein),
time_res = list(time_res$transcriptomic),
drug = drug,
QALYloss = QALYloss,
terminal_cost = terminal_cost_rulein,
terminal_health = terminal_health_rulein)
dat[['proteomic_SELDI']] <- dectree(
data = data,
name.newtest = "proteomic_SELDI",
costDistns = costs,
performance = list(performance$proteomic_SELDI_rulein),
time_res = list(time_res$proteomic_SELDI),
drug = drug,
QALYloss = QALYloss,
terminal_cost = terminal_cost_rulein,
terminal_health = terminal_health_rulein)
dat[['IGRA_flowcytometry']] <- dectree(
data = data,
name.newtest = c("IGRA", "flow_cytometry"),
costDistns = costs,
performance = performance[c('IGRA', 'flow_cytometry_HIVneg')],
time_res = time_res[c('IGRA', 'flow_cytometry')],
drug = drug,
QALYloss = QALYloss,
terminal_cost = terminal_cost_dual_rulein,
terminal_health = terminal_health_dual_rulein)
e_df <- do.call(cbind,
purrr::map(dat, 'e'))
c_df <- do.call(cbind,
purrr::map(dat, 'c'))
evens <- seq(from = 2, to = 2*length(dat), 2)
odds <- evens - 1
QALYgain <- as.matrix(data.frame(0, e_df[ ,odds] - e_df[ ,evens]))
cost_incur <- as.matrix(data.frame(0, c_df[ ,evens] - c_df[ ,odds]))
res_bcea <- bcea(e = -QALYgain,
c = -cost_incur,
interventions = c("status-quo", names(dat)))
##########
# output #
##########
contour2(res_bcea, graph = "ggplot2")
my_contour2(res_bcea, graph = "ggplot2", CONTOUR_PC = '5%') +
coord_cartesian(xlim = c(-0.01, 0.002)) +
theme(legend.position = "none")
my_contour2_facet(dat)
cost_effectiveness_table(dat)
(result_tab <- cost_effectiveness_table(res_bcea))
write.csv(x = result_tab,
file = "output/ICERtable_rulein.csv")
| /scripts/output_rulein.R | no_license | n8thangreen/VANTDET | R | false | false | 2,071 | r | #
# VANTDET:
# output_rulein.R
# N Green
#
# output table of key cost-effectiveness statistics
# for rule-in test
##TODO: DRY duplication in input arguments
dat <- list()
dat[['transcriptomic']] <- dectree(
data = data,
name.newtest = "transcriptomic",
costDistns = costs,
performance = list(performance$transcriptomic_rulein),
time_res = list(time_res$transcriptomic),
drug = drug,
QALYloss = QALYloss,
terminal_cost = terminal_cost_rulein,
terminal_health = terminal_health_rulein)
dat[['proteomic_SELDI']] <- dectree(
data = data,
name.newtest = "proteomic_SELDI",
costDistns = costs,
performance = list(performance$proteomic_SELDI_rulein),
time_res = list(time_res$proteomic_SELDI),
drug = drug,
QALYloss = QALYloss,
terminal_cost = terminal_cost_rulein,
terminal_health = terminal_health_rulein)
dat[['IGRA_flowcytometry']] <- dectree(
data = data,
name.newtest = c("IGRA", "flow_cytometry"),
costDistns = costs,
performance = performance[c('IGRA', 'flow_cytometry_HIVneg')],
time_res = time_res[c('IGRA', 'flow_cytometry')],
drug = drug,
QALYloss = QALYloss,
terminal_cost = terminal_cost_dual_rulein,
terminal_health = terminal_health_dual_rulein)
e_df <- do.call(cbind,
purrr::map(dat, 'e'))
c_df <- do.call(cbind,
purrr::map(dat, 'c'))
evens <- seq(from = 2, to = 2*length(dat), 2)
odds <- evens - 1
QALYgain <- as.matrix(data.frame(0, e_df[ ,odds] - e_df[ ,evens]))
cost_incur <- as.matrix(data.frame(0, c_df[ ,evens] - c_df[ ,odds]))
res_bcea <- bcea(e = -QALYgain,
c = -cost_incur,
interventions = c("status-quo", names(dat)))
##########
# output #
##########
contour2(res_bcea, graph = "ggplot2")
my_contour2(res_bcea, graph = "ggplot2", CONTOUR_PC = '5%') +
coord_cartesian(xlim = c(-0.01, 0.002)) +
theme(legend.position = "none")
my_contour2_facet(dat)
cost_effectiveness_table(dat)
(result_tab <- cost_effectiveness_table(res_bcea))
write.csv(x = result_tab,
file = "output/ICERtable_rulein.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{cluster_add_tasks}
\alias{cluster_add_tasks}
\title{Add a Tasks to a Cluster}
\usage{
cluster_add_tasks(cl, x, f)
}
\arguments{
\item{cl}{a cluster object}
\item{x}{the data}
\item{f}{function to be applied to the data}
}
\description{
Add a batch of tasks to a cluster
}
| /man/cluster_add_tasks.Rd | no_license | KirosG/threadpool | R | false | true | 370 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{cluster_add_tasks}
\alias{cluster_add_tasks}
\title{Add a Tasks to a Cluster}
\usage{
cluster_add_tasks(cl, x, f)
}
\arguments{
\item{cl}{a cluster object}
\item{x}{the data}
\item{f}{function to be applied to the data}
}
\description{
Add a batch of tasks to a cluster
}
|
fbGetBusinessUserAdAccounts <- function(business_users_id = NULL,
business_id = getOption("rfacebookstat.business_id"),
api_version = getOption("rfacebookstat.api_version"),
access_token = getOption("rfacebookstat.access_token"))
{
if ( is.null(access_token) || is.null(business_id) ) {
stop("Arguments access_token and business_id is require.")
}
factor_change <- FALSE
if (getOption("stringsAsFactors")) {
options(stringsAsFactors = F)
factor_change <- TRUE
}
# check user id
if ( is.null(business_users_id) ) {
QueryString <- paste0("https://graph.facebook.com/",
api_version, "/", business_id, "/business_users?fields=id,email,business,first_name,last_name,name,role,title", "&limit=150&access_token=", access_token)
answer <- httr::GET(QueryString)
raw <- fromJSON(httr::content(answer, "text", "application/json",
encoding = "UTF-8"))
message("user = ", raw$data$name, "\nid = ", raw$data$id)
business_users_id <- raw$data$id
}
# load accounts
QueryString <- paste0("https://graph.facebook.com/",
api_version, "/", business_users_id, "/assigned_ad_accounts?fields=tasks,permitted_tasks",
"&limit=5000&access_token=", access_token)
# get answer
answer <- httr::GET(QueryString)
# pars answer
raw <- fromJSON(httr::content(answer, "text", "application/json",
encoding = "UTF-8"))
# check for error
if (length(raw$error) > 0) {
packageStartupMessage(paste0(" - ", raw$error$code,
" - ", raw$error$message), appendLF = T)
}
# create data frame
result <- data.frame(id = raw$data$id,
task = sapply(raw$data$tasks, str_c, collapse = ","),
permitted_tasks = sapply(raw$data$permitted_tasks, str_c, collapse = ","))
return(result)
}
| /rfacebookstat/R/fbGetBusinessUserAdAccounts.R | no_license | selesnow/testspace | R | false | false | 2,204 | r | fbGetBusinessUserAdAccounts <- function(business_users_id = NULL,
business_id = getOption("rfacebookstat.business_id"),
api_version = getOption("rfacebookstat.api_version"),
access_token = getOption("rfacebookstat.access_token"))
{
if ( is.null(access_token) || is.null(business_id) ) {
stop("Arguments access_token and business_id is require.")
}
factor_change <- FALSE
if (getOption("stringsAsFactors")) {
options(stringsAsFactors = F)
factor_change <- TRUE
}
# check user id
if ( is.null(business_users_id) ) {
QueryString <- paste0("https://graph.facebook.com/",
api_version, "/", business_id, "/business_users?fields=id,email,business,first_name,last_name,name,role,title", "&limit=150&access_token=", access_token)
answer <- httr::GET(QueryString)
raw <- fromJSON(httr::content(answer, "text", "application/json",
encoding = "UTF-8"))
message("user = ", raw$data$name, "\nid = ", raw$data$id)
business_users_id <- raw$data$id
}
# load accounts
QueryString <- paste0("https://graph.facebook.com/",
api_version, "/", business_users_id, "/assigned_ad_accounts?fields=tasks,permitted_tasks",
"&limit=5000&access_token=", access_token)
# get answer
answer <- httr::GET(QueryString)
# pars answer
raw <- fromJSON(httr::content(answer, "text", "application/json",
encoding = "UTF-8"))
# check for error
if (length(raw$error) > 0) {
packageStartupMessage(paste0(" - ", raw$error$code,
" - ", raw$error$message), appendLF = T)
}
# create data frame
result <- data.frame(id = raw$data$id,
task = sapply(raw$data$tasks, str_c, collapse = ","),
permitted_tasks = sapply(raw$data$permitted_tasks, str_c, collapse = ","))
return(result)
}
|
#sink will write the output of the source file to a text file
#wihout split, sink will only output stuff to the next file -
##you won't get output in the console.
#Usinng split=TRUE means output is "split" between the console
##and the text file, so you get output in both
sink("test.txt",split=TRUE)
x<-c(1,2,3,4,5)
y<-c(6,7,8,9,10)
z<-x*y
print(z)
sink() | /Week03/week3.R | no_license | telliott27/R-Tutorial | R | false | false | 364 | r | #sink will write the output of the source file to a text file
#wihout split, sink will only output stuff to the next file -
##you won't get output in the console.
#Usinng split=TRUE means output is "split" between the console
##and the text file, so you get output in both
sink("test.txt",split=TRUE)
x<-c(1,2,3,4,5)
y<-c(6,7,8,9,10)
z<-x*y
print(z)
sink() |
#' @title Recreate Match Data
#'
#' @description A function that can auto generate and order by date all
#' the basic match information if it exists in redis.
#'
#' @details Redis Keys used;
#' \itemize{
#' \item{\strong{[HASH]} :: \code{csm:{comp_id}:{season}:{match_id}}}
#' }
#'
#' @param KEYS A list containing options such as testing / prediction /
#' important variables and information. Also contains API information.
#'
#' @return matchData. A data frame containing all the matches in a particular season.
#'
#' @export
recreate_matchdata <- function(KEYS) {
# Get all the redis match keys
allMatches <- paste0('csm:', KEYS$COMP, ':', KEYS$SEASON, '*') %>%
KEYS$RED$KEYS()
# If keys exist then create a data frame
if (allMatches %>% length %>% `>`(0)) {
matchRes <- KEYS$RED$pipeline(
.commands = lapply(
X = allMatches %>% purrr::flatten_chr(),
FUN = function(x) x %>% KEYS$PIPE$HGETALL()
)
) %>%
lapply(footballstats::create_hash)
matchData <- lapply(
X = 1:(matchRes %>% length),
FUN = function(x) {
matchRes[[x]] %>% data.frame(stringsAsFactors = FALSE)
}
) %>%
purrr::reduce(rbind) %>%
footballstats::order_matchdata()
} else {
print(paste0(Sys.time(), ' : No match data found for the providing input parameters.'))
matchData <- data.frame()
}
return(matchData)
}
#' @title Order Match Dataset
#'
#' @description A function that takes an arbitrary data frame
#' consisting of match data and orders it by date in ascending
#' order.
#'
#' @param matchData A data frame that contains rows of single matches
#' that have been played between two teams.
#' @param formatter A string that defines how the dates coming in should
#' look, i.e. allow for any kind of standard date formats.
#'
#' @return A data frame that has been ordered by date.
#'
#' @export
order_matchdata <- function(matchData, formatter = '%d.%m.%Y') {
matchData$zzz.date %<>% as.Date(format = formatter)
matchData <- matchData[matchData$zzz.date %>% order(matchData$zzz.matchID), ]
return(matchData)
}
#' @title Commentary From Redis
#'
#' @description A function that retrieves the basic commentary
#' information from redis.
#'
#' @param keyName A character string that defines the redis key
#' where the commentary is stored.
#' @param returnItems A character vector defining the names of the
#' fields in the commentary key in redis to be retrieved.
#'
#' @return A vector of integers that correspond to the \code{returnItem}
#' values requested.
#'
#' @export
commentary_from_redis <- function(KEYS, keyName, returnItems) {
# Get all commentary items
results <- keyName %>%
KEYS$RED$HMGET(
field = returnItems
)
names(results) <- returnItems
# Replace possesiontime here
if ("possesiontime" %in% returnItems) {
results$possesiontime %<>% gsub(
pattern = "%",
replacement = ""
)
}
# Filter out items that don't exist
nonNull <- results %>%
lapply(length) %>%
purrr::flatten_int() %>%
as.logical
# Vectorise and convert to doubles where necessarcy
vec <- sapply(
X = 1:(results %>% length),
FUN = function(x) {
it <- results[[x]]
if (nonNull[x]) {
if (it == "") 0 else it %>% as.double
} else {
NA
}
}
)
# Make sure the items returned is the same length as requested
return(if (vec %>% anyNA) NULL else vec)
}
#' @title Scale Data
#'
#' @description A function that takes a data set and scals it based
#' on the scaling parameters that have already been calculated elsewhere.
#'
#' @param mDat A data frame that defines the data set used to build the model
#' which is currently UNSCALED.
#' @param dataScales A list which contains multiple information, including \code{sMax}
#' and \code{sMin} which define the bounds of the \code{dataScale$commentaries} values.
#'
#' @return A data frame the same size as \code{mDat}, which has now been scaled.
#'
#' @export
scale_data <- function(mDat, dataScales) {
scaled.data <- mDat[ , 1:dataScales$cols] %>% scale(
center = dataScales$sMin,
scale = dataScales$sMax - dataScales$sMin
) %>% as.data.frame
if ('res' %in% (mDat %>% colnames)) scaled.data %<>% cbind(res = mDat$res)
return(scaled.data)
}
| /R/classify_utils.R | no_license | O1sims/FootballStats | R | false | false | 4,332 | r | #' @title Recreate Match Data
#'
#' @description A function that can auto generate and order by date all
#' the basic match information if it exists in redis.
#'
#' @details Redis Keys used;
#' \itemize{
#' \item{\strong{[HASH]} :: \code{csm:{comp_id}:{season}:{match_id}}}
#' }
#'
#' @param KEYS A list containing options such as testing / prediction /
#' important variables and information. Also contains API information.
#'
#' @return matchData. A data frame containing all the matches in a particular season.
#'
#' @export
recreate_matchdata <- function(KEYS) {
# Get all the redis match keys
allMatches <- paste0('csm:', KEYS$COMP, ':', KEYS$SEASON, '*') %>%
KEYS$RED$KEYS()
# If keys exist then create a data frame
if (allMatches %>% length %>% `>`(0)) {
matchRes <- KEYS$RED$pipeline(
.commands = lapply(
X = allMatches %>% purrr::flatten_chr(),
FUN = function(x) x %>% KEYS$PIPE$HGETALL()
)
) %>%
lapply(footballstats::create_hash)
matchData <- lapply(
X = 1:(matchRes %>% length),
FUN = function(x) {
matchRes[[x]] %>% data.frame(stringsAsFactors = FALSE)
}
) %>%
purrr::reduce(rbind) %>%
footballstats::order_matchdata()
} else {
print(paste0(Sys.time(), ' : No match data found for the providing input parameters.'))
matchData <- data.frame()
}
return(matchData)
}
#' @title Order Match Dataset
#'
#' @description A function that takes an arbitrary data frame
#' consisting of match data and orders it by date in ascending
#' order.
#'
#' @param matchData A data frame that contains rows of single matches
#' that have been played between two teams.
#' @param formatter A string that defines how the dates coming in should
#' look, i.e. allow for any kind of standard date formats.
#'
#' @return A data frame that has been ordered by date.
#'
#' @export
order_matchdata <- function(matchData, formatter = '%d.%m.%Y') {
matchData$zzz.date %<>% as.Date(format = formatter)
matchData <- matchData[matchData$zzz.date %>% order(matchData$zzz.matchID), ]
return(matchData)
}
#' @title Commentary From Redis
#'
#' @description A function that retrieves the basic commentary
#' information from redis.
#'
#' @param keyName A character string that defines the redis key
#' where the commentary is stored.
#' @param returnItems A character vector defining the names of the
#' fields in the commentary key in redis to be retrieved.
#'
#' @return A vector of integers that correspond to the \code{returnItem}
#' values requested.
#'
#' @export
commentary_from_redis <- function(KEYS, keyName, returnItems) {
# Get all commentary items
results <- keyName %>%
KEYS$RED$HMGET(
field = returnItems
)
names(results) <- returnItems
# Replace possesiontime here
if ("possesiontime" %in% returnItems) {
results$possesiontime %<>% gsub(
pattern = "%",
replacement = ""
)
}
# Filter out items that don't exist
nonNull <- results %>%
lapply(length) %>%
purrr::flatten_int() %>%
as.logical
# Vectorise and convert to doubles where necessarcy
vec <- sapply(
X = 1:(results %>% length),
FUN = function(x) {
it <- results[[x]]
if (nonNull[x]) {
if (it == "") 0 else it %>% as.double
} else {
NA
}
}
)
# Make sure the items returned is the same length as requested
return(if (vec %>% anyNA) NULL else vec)
}
#' @title Scale Data
#'
#' @description A function that takes a data set and scals it based
#' on the scaling parameters that have already been calculated elsewhere.
#'
#' @param mDat A data frame that defines the data set used to build the model
#' which is currently UNSCALED.
#' @param dataScales A list which contains multiple information, including \code{sMax}
#' and \code{sMin} which define the bounds of the \code{dataScale$commentaries} values.
#'
#' @return A data frame the same size as \code{mDat}, which has now been scaled.
#'
#' @export
scale_data <- function(mDat, dataScales) {
scaled.data <- mDat[ , 1:dataScales$cols] %>% scale(
center = dataScales$sMin,
scale = dataScales$sMax - dataScales$sMin
) %>% as.data.frame
if ('res' %in% (mDat %>% colnames)) scaled.data %<>% cbind(res = mDat$res)
return(scaled.data)
}
|
library(RJDBC)
drv <- JDBC("oracle.jdbc.OracleDriver", classPath = "C:/Users/Rafael/.DataGrip2017.2/config/jdbc-drivers/Oracle/12.1.0.2/ojdbc6-12.1.0.2.jar")
con <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/pdbrocampo", "rocampo", "rocampo")
d <- dbGetQuery(con, "Select SysDate From Dual")
consolidado <- dbGetQuery(con, "Select * From Consolidado_ICA where rownum < 20")
head(consolidado, 3)
nrow(consolidado)
#Trayendo datos por pedazos
res <- dbSendQuery(con, "Select * From Consolidado_Ica Where rownum < 300")
chunk <- NULL
while (!dbHasCompleted(res)) {
chunk <- dbFetch(res, 100)
print(nrow(chunk), " records fetched\n")
}
dbClearResult(res)
dbDisconnect(con)
chunk
| /R/Database.R | no_license | rocampoa/EjemplosR | R | false | false | 688 | r | library(RJDBC)
drv <- JDBC("oracle.jdbc.OracleDriver", classPath = "C:/Users/Rafael/.DataGrip2017.2/config/jdbc-drivers/Oracle/12.1.0.2/ojdbc6-12.1.0.2.jar")
con <- dbConnect(drv, "jdbc:oracle:thin:@//localhost:1521/pdbrocampo", "rocampo", "rocampo")
d <- dbGetQuery(con, "Select SysDate From Dual")
consolidado <- dbGetQuery(con, "Select * From Consolidado_ICA where rownum < 20")
head(consolidado, 3)
nrow(consolidado)
#Trayendo datos por pedazos
res <- dbSendQuery(con, "Select * From Consolidado_Ica Where rownum < 300")
chunk <- NULL
while (!dbHasCompleted(res)) {
chunk <- dbFetch(res, 100)
print(nrow(chunk), " records fetched\n")
}
dbClearResult(res)
dbDisconnect(con)
chunk
|
# Before running please set the UCI HAR Dataset folder as the working directory
# Please remember to have all your packages up-to-date
library(data.table)
# Part 0: Read all data
## 1: Complementary data
features <- fread("./features.txt", sep=" ")
activity_labels <- fread("./activity_labels.txt", sep=" ")
### Set variable names for features and activity_levels
setnames(features,c("feat_id","feat_name"))
setnames(activity_labels,c("activity","act_name"))
## 2: Train set
### 2a: Data
subject_train <- fread("./train/subject_train.txt")
X_train <- fread("./train/X_train.txt")
y_train <- fread("./train/y_train.txt")
#### Set variable names forn subject_train, X_train and y_train
setnames(subject_train,"subject")
setnames(X_train,features$feat_name)
setnames(y_train,"activity")
### 2b: Inertial Signals (optional)
body_acc_x_train <- fread("./train/Inertial Signals/body_acc_x_train.txt")
body_acc_y_train <- fread("./train/Inertial Signals/body_acc_y_train.txt")
body_acc_z_train <- fread("./train/Inertial Signals/body_acc_z_train.txt")
body_gyro_x_train <- fread("./train/Inertial Signals/body_gyro_x_train.txt")
body_gyro_y_train <- fread("./train/Inertial Signals/body_gyro_y_train.txt")
body_gyro_z_train <- fread("./train/Inertial Signals/body_gyro_z_train.txt")
total_acc_x_train <- fread("./train/Inertial Signals/total_acc_x_train.txt")
total_acc_y_train <- fread("./train/Inertial Signals/total_acc_y_train.txt")
total_acc_z_train <- fread("./train/Inertial Signals/total_acc_z_train.txt")
## 3: Test set
### 3a: Data
subject_test <- fread("./test/subject_test.txt")
X_test <- fread("./test/X_test.txt")
y_test <- fread("./test/y_test.txt")
#### Set variable names forn subject_test, X_test and y_test
setnames(subject_test,"subject")
setnames(X_test,features$feat_name)
setnames(y_test,"activity")
### 3b: Inertial Signals (optional)
body_acc_x_test <- fread("./test/Inertial Signals/body_acc_x_test.txt")
body_acc_y_test <- fread("./test/Inertial Signals/body_acc_y_test.txt")
body_acc_z_test <- fread("./test/Inertial Signals/body_acc_z_test.txt")
body_gyro_x_test <- fread("./test/Inertial Signals/body_gyro_x_test.txt")
body_gyro_y_test <- fread("./test/Inertial Signals/body_gyro_y_test.txt")
body_gyro_z_test <- fread("./test/Inertial Signals/body_gyro_z_test.txt")
total_acc_x_test <- fread("./test/Inertial Signals/total_acc_x_test.txt")
total_acc_y_test <- fread("./test/Inertial Signals/total_acc_y_test.txt")
total_acc_z_test <- fread("./test/Inertial Signals/total_acc_z_test.txt")
### END OF DATA READING ###
### START OF ASSIGNMENT ###
# Part 1: Merge train and test set
data <- rbind(cbind(X_train,y_train,subject_train),cbind(X_test,y_test,subject_test))
# Part 2: Extract measurements on the mean and standard deviation
## In file features_info.txt is specified how to identify variables related to mean and standard deviation (=variable name contains the words "mean()" or "std()")
cols <- c(sapply(unique(rbind(features[grepl(pattern = 'mean()',x = features$feat_name,fixed = TRUE),2,with=F],features[grepl(pattern = 'std()',x = features$feat_name,fixed = TRUE),2,with=F])),as.character),"subject","activity")
data <- data[,cols,with=F]
# Part 3: Change activity variable from code to description
## Add column "act_name" from dataset "activity_labels" to dataset "data", using column "activity" as key
data <- merge(data,y = activity_labels,by = "activity",all.x = T)
## Now we have two activity columns in dataset "data": the original one called "activity" labeled as code, and "act_name" labeled as description
## Because we use merge, there is a one-to-one correspondance between "activity" and "act_name", as we can check with the table command below
table(data$activity,data$act_name)
## Since we only need the new activity column, we just delete the original one
data <- data[,-c("activity"),with=F]
## Finally we rename the description column as "activity", just as the original one
## One way to interpret this code is that we just "replaced" the codes in variable "activity" with their respective descriptions.
setnames(data,"act_name","activity")
# Part 4: Labels the data set with descriptive variable names.
## For this part, the conventions of tidy data will be followed. That means the variables name:
## - Are intended to be descriptive (e.g. long names instead of short)
## - Will have NO underscores, dots, spaces or other symbols, EXCEPT hyphen ("-") to identify the schema of the information.
## - All will be lowercase
## Create initial vector with variable names in lowercase (excluding "subject" and "label", 66 in total)
new_var_names <- tolower(cols[1:(length(cols)-2)])
## 1: Replace the first character of every variable (f->frequency, t->time)
new_var_names <- paste(ifelse(substring(new_var_names,1,1)=="f","frequency-","time-"),substring(new_var_names,2,nchar(new_var_names)),sep="")
## 2: Replace "bodybody" for "body-"
new_var_names <- gsub('bodybody','body',x = new_var_names)
new_var_names <- gsub('body','body-',x = new_var_names)
## 3: Replace "acc" for "acceleration-"
new_var_names <- gsub('acc','acceleration-',x = new_var_names)
## 4: Replace "gyro" for "angularvelocity-"
new_var_names <- gsub('gyro','angularvelocity-',x = new_var_names)
## 5: Replace "mag" for "magnitude-"
new_var_names <- gsub('mag','magnitude-',x = new_var_names)
## 6: Replace "gravity" for "gravity-"
new_var_names <- gsub('gravity','gravity-',x = new_var_names)
## 7: Replace "jerk" for "jerk-"
new_var_names <- gsub('jerk','jerk-',x = new_var_names)
## 8: Replace "-std()" for "-standarddeviation"
new_var_names <- gsub('-std()','standarddeviation',x = new_var_names)
## 9: Replace "-mean()" for "-mean"
new_var_names <- gsub('-std()','standarddeviation',x = new_var_names)
## 10: Replace "()" for ""
new_var_names <- gsub('\\(\\)','\\-',x = new_var_names)
## 11: Replace "--" for "-"
new_var_names <- gsub('\\-\\-','\\-',x = new_var_names)
## 12: Delete the last character if it is "-"
new_var_names <- paste(substring(new_var_names,1,nchar(new_var_names)-1),ifelse(substring(new_var_names,nchar(new_var_names),nchar(new_var_names))=="-","",substring(new_var_names,nchar(new_var_names),nchar(new_var_names))),sep="")
## Add "subject" and "activity" to names vector
new_var_names <- c(new_var_names,"subject","activity")
## Put the final names to variables in dataset "data"
setnames(data,new_var_names)
## Check names
names(data)
# Part 5: Creating the tidy data file
## Calculate average for each variable (except "subject" and "activity", that are the variables used for grouping)
tidy_data <- data[, lapply(.SD, mean, na.rm=TRUE), by=list(subject,activity)]
## Export final dataset (using TAB as delimiter, strings NOT quoted)
write.table(tidy_data,"./output/tidy_data.txt",quote=F,row.name=FALSE,sep="\t")
### END OF ASSIGNMENT ###
# Annex
# Load file with details of the variables
details <- fread("./details.txt",header=T)
if(file.exists("./output/summary_tidy_data.txt")) file.remove("./output/summary_tidy_data.txt")
if(file.exists("./output/variables_tidy_data.txt")) file.remove("./output/variables_tidy_data.txt")
if(file.exists("./output/description_tidy_data.txt")) file.remove("./output/description_tidy_data.txt")
for(i in 1:ncol(tidy_data)) {
write(paste("Variable ",i,": ",names(tidy_data[,i,with=F]),sep=""),"./output/summary_tidy_data.txt",append=TRUE)
write("","./output/summary_tidy_data.txt",append=TRUE)
write(summary(tidy_data[,i,with=F]),"./output/summary_tidy_data.txt",append=TRUE)
write("","./output/summary_tidy_data.txt",append=TRUE)
write(paste(i,". ",names(tidy_data[,i,with=F]),sep=""),"./output/variables_tidy_data.txt",append=TRUE)
write(paste("Variable ",i,": ",names(tidy_data[,i,with=F]),sep=""),"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write(paste("* Class: ",class(names(tidy_data[,i,with=F])),sep=""),"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("* Unique values:","./output/description_tidy_data.txt",append=TRUE)
write(sapply(tidy_data[,i,with=F],unique),"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("* Description of the variable:","./output/description_tidy_data.txt",append=TRUE)
write(details$description[i],"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("* Explanation of the schema:","./output/description_tidy_data.txt",append=TRUE)
write(details$schema[i],"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
}
| /run_analysis.R | no_license | injemaster83/GettingAndCleaningData | R | false | false | 8,851 | r | # Before running please set the UCI HAR Dataset folder as the working directory
# Please remember to have all your packages up-to-date
library(data.table)
# Part 0: Read all data
## 1: Complementary data
features <- fread("./features.txt", sep=" ")
activity_labels <- fread("./activity_labels.txt", sep=" ")
### Set variable names for features and activity_levels
setnames(features,c("feat_id","feat_name"))
setnames(activity_labels,c("activity","act_name"))
## 2: Train set
### 2a: Data
subject_train <- fread("./train/subject_train.txt")
X_train <- fread("./train/X_train.txt")
y_train <- fread("./train/y_train.txt")
#### Set variable names forn subject_train, X_train and y_train
setnames(subject_train,"subject")
setnames(X_train,features$feat_name)
setnames(y_train,"activity")
### 2b: Inertial Signals (optional)
body_acc_x_train <- fread("./train/Inertial Signals/body_acc_x_train.txt")
body_acc_y_train <- fread("./train/Inertial Signals/body_acc_y_train.txt")
body_acc_z_train <- fread("./train/Inertial Signals/body_acc_z_train.txt")
body_gyro_x_train <- fread("./train/Inertial Signals/body_gyro_x_train.txt")
body_gyro_y_train <- fread("./train/Inertial Signals/body_gyro_y_train.txt")
body_gyro_z_train <- fread("./train/Inertial Signals/body_gyro_z_train.txt")
total_acc_x_train <- fread("./train/Inertial Signals/total_acc_x_train.txt")
total_acc_y_train <- fread("./train/Inertial Signals/total_acc_y_train.txt")
total_acc_z_train <- fread("./train/Inertial Signals/total_acc_z_train.txt")
## 3: Test set
### 3a: Data
subject_test <- fread("./test/subject_test.txt")
X_test <- fread("./test/X_test.txt")
y_test <- fread("./test/y_test.txt")
#### Set variable names forn subject_test, X_test and y_test
setnames(subject_test,"subject")
setnames(X_test,features$feat_name)
setnames(y_test,"activity")
### 3b: Inertial Signals (optional)
body_acc_x_test <- fread("./test/Inertial Signals/body_acc_x_test.txt")
body_acc_y_test <- fread("./test/Inertial Signals/body_acc_y_test.txt")
body_acc_z_test <- fread("./test/Inertial Signals/body_acc_z_test.txt")
body_gyro_x_test <- fread("./test/Inertial Signals/body_gyro_x_test.txt")
body_gyro_y_test <- fread("./test/Inertial Signals/body_gyro_y_test.txt")
body_gyro_z_test <- fread("./test/Inertial Signals/body_gyro_z_test.txt")
total_acc_x_test <- fread("./test/Inertial Signals/total_acc_x_test.txt")
total_acc_y_test <- fread("./test/Inertial Signals/total_acc_y_test.txt")
total_acc_z_test <- fread("./test/Inertial Signals/total_acc_z_test.txt")
### END OF DATA READING ###
### START OF ASSIGNMENT ###
# Part 1: Merge train and test set
data <- rbind(cbind(X_train,y_train,subject_train),cbind(X_test,y_test,subject_test))
# Part 2: Extract measurements on the mean and standard deviation
## In file features_info.txt is specified how to identify variables related to mean and standard deviation (=variable name contains the words "mean()" or "std()")
cols <- c(sapply(unique(rbind(features[grepl(pattern = 'mean()',x = features$feat_name,fixed = TRUE),2,with=F],features[grepl(pattern = 'std()',x = features$feat_name,fixed = TRUE),2,with=F])),as.character),"subject","activity")
data <- data[,cols,with=F]
# Part 3: Change activity variable from code to description
## Add column "act_name" from dataset "activity_labels" to dataset "data", using column "activity" as key
data <- merge(data,y = activity_labels,by = "activity",all.x = T)
## Now we have two activity columns in dataset "data": the original one called "activity" labeled as code, and "act_name" labeled as description
## Because we use merge, there is a one-to-one correspondance between "activity" and "act_name", as we can check with the table command below
table(data$activity,data$act_name)
## Since we only need the new activity column, we just delete the original one
data <- data[,-c("activity"),with=F]
## Finally we rename the description column as "activity", just as the original one
## One way to interpret this code is that we just "replaced" the codes in variable "activity" with their respective descriptions.
setnames(data,"act_name","activity")
# Part 4: Labels the data set with descriptive variable names.
## For this part, the conventions of tidy data will be followed. That means the variables name:
## - Are intended to be descriptive (e.g. long names instead of short)
## - Will have NO underscores, dots, spaces or other symbols, EXCEPT hyphen ("-") to identify the schema of the information.
## - All will be lowercase
## Create initial vector with variable names in lowercase (excluding "subject" and "label", 66 in total)
new_var_names <- tolower(cols[1:(length(cols)-2)])
## 1: Replace the first character of every variable (f->frequency, t->time)
new_var_names <- paste(ifelse(substring(new_var_names,1,1)=="f","frequency-","time-"),substring(new_var_names,2,nchar(new_var_names)),sep="")
## 2: Replace "bodybody" for "body-"
new_var_names <- gsub('bodybody','body',x = new_var_names)
new_var_names <- gsub('body','body-',x = new_var_names)
## 3: Replace "acc" for "acceleration-"
new_var_names <- gsub('acc','acceleration-',x = new_var_names)
## 4: Replace "gyro" for "angularvelocity-"
new_var_names <- gsub('gyro','angularvelocity-',x = new_var_names)
## 5: Replace "mag" for "magnitude-"
new_var_names <- gsub('mag','magnitude-',x = new_var_names)
## 6: Replace "gravity" for "gravity-"
new_var_names <- gsub('gravity','gravity-',x = new_var_names)
## 7: Replace "jerk" for "jerk-"
new_var_names <- gsub('jerk','jerk-',x = new_var_names)
## 8: Replace "-std()" for "-standarddeviation"
new_var_names <- gsub('-std()','standarddeviation',x = new_var_names)
## 9: Replace "-mean()" for "-mean"
new_var_names <- gsub('-std()','standarddeviation',x = new_var_names)
## 10: Replace "()" for ""
new_var_names <- gsub('\\(\\)','\\-',x = new_var_names)
## 11: Replace "--" for "-"
new_var_names <- gsub('\\-\\-','\\-',x = new_var_names)
## 12: Delete the last character if it is "-"
new_var_names <- paste(substring(new_var_names,1,nchar(new_var_names)-1),ifelse(substring(new_var_names,nchar(new_var_names),nchar(new_var_names))=="-","",substring(new_var_names,nchar(new_var_names),nchar(new_var_names))),sep="")
## Add "subject" and "activity" to names vector
new_var_names <- c(new_var_names,"subject","activity")
## Put the final names to variables in dataset "data"
setnames(data,new_var_names)
## Check names
names(data)
# Part 5: Creating the tidy data file
## Calculate average for each variable (except "subject" and "activity", that are the variables used for grouping)
tidy_data <- data[, lapply(.SD, mean, na.rm=TRUE), by=list(subject,activity)]
## Export final dataset (using TAB as delimiter, strings NOT quoted)
write.table(tidy_data,"./output/tidy_data.txt",quote=F,row.name=FALSE,sep="\t")
### END OF ASSIGNMENT ###
# Annex
# Load file with details of the variables
details <- fread("./details.txt",header=T)
if(file.exists("./output/summary_tidy_data.txt")) file.remove("./output/summary_tidy_data.txt")
if(file.exists("./output/variables_tidy_data.txt")) file.remove("./output/variables_tidy_data.txt")
if(file.exists("./output/description_tidy_data.txt")) file.remove("./output/description_tidy_data.txt")
for(i in 1:ncol(tidy_data)) {
write(paste("Variable ",i,": ",names(tidy_data[,i,with=F]),sep=""),"./output/summary_tidy_data.txt",append=TRUE)
write("","./output/summary_tidy_data.txt",append=TRUE)
write(summary(tidy_data[,i,with=F]),"./output/summary_tidy_data.txt",append=TRUE)
write("","./output/summary_tidy_data.txt",append=TRUE)
write(paste(i,". ",names(tidy_data[,i,with=F]),sep=""),"./output/variables_tidy_data.txt",append=TRUE)
write(paste("Variable ",i,": ",names(tidy_data[,i,with=F]),sep=""),"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write(paste("* Class: ",class(names(tidy_data[,i,with=F])),sep=""),"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("* Unique values:","./output/description_tidy_data.txt",append=TRUE)
write(sapply(tidy_data[,i,with=F],unique),"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("* Description of the variable:","./output/description_tidy_data.txt",append=TRUE)
write(details$description[i],"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("* Explanation of the schema:","./output/description_tidy_data.txt",append=TRUE)
write(details$schema[i],"./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
write("","./output/description_tidy_data.txt",append=TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qdapDictionaries-package.R
\docType{data}
\name{positive.words}
\alias{positive.words}
\title{Positive Words}
\format{A vector with 2003 elements}
\usage{
data(positive.words)
}
\description{
A dataset containing a vector of positive words.
}
\details{
A sentence containing more negative words would be deemed a negative sentence,
whereas a sentence containing more positive words would be considered positive.
}
\references{
Hu, M., & Liu, B. (2004). Mining opinion features in customer
reviews. National Conference on Artificial Intelligence.
\url{http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html}
}
\keyword{datasets}
| /man/positive.words.Rd | no_license | cran/qdapDictionaries | R | false | true | 734 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qdapDictionaries-package.R
\docType{data}
\name{positive.words}
\alias{positive.words}
\title{Positive Words}
\format{A vector with 2003 elements}
\usage{
data(positive.words)
}
\description{
A dataset containing a vector of positive words.
}
\details{
A sentence containing more negative words would be deemed a negative sentence,
whereas a sentence containing more positive words would be considered positive.
}
\references{
Hu, M., & Liu, B. (2004). Mining opinion features in customer
reviews. National Conference on Artificial Intelligence.
\url{http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html}
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/computeAUC.R
\name{computeAUC}
\alias{computeAUC}
\title{Computes the AUC for a Drug Dose Viability Curve}
\usage{
computeAUC(
concentration,
viability,
Hill_fit,
conc_as_log = FALSE,
viability_as_pct = TRUE,
trunc = TRUE,
area.type = c("Fitted", "Actual"),
verbose = TRUE
)
}
\arguments{
\item{concentration}{[vector] is a vector of drug concentrations.}
\item{viability}{[vector] is a vector whose entries are the viability values observed in the presence of the
drug concentrations whose logarithms are in the corresponding entries of conc, where viability 0
indicates that all cells died, and viability 1 indicates that the drug had no effect on the cells.}
\item{Hill_fit}{[list or vector] In the order: c("Hill Slope", "E_inf", "EC50"), the parameters of a Hill Slope
as returned by logLogisticRegression. If conc_as_log is set then the function assumes logEC50 is passed in, and if
viability_as_pct flag is set, it assumes E_inf is passed in as a percent. Otherwise, E_inf is assumed to be a decimal,
and EC50 as a concentration.}
\item{conc_as_log}{[logical], if true, assumes that log10-concentration data has been given rather than concentration data.}
\item{viability_as_pct}{[logical], if false, assumes that viability is given as a decimal rather
than a percentage, and returns AUC as a decimal. Otherwise, viability is interpreted as percent, and AUC is returned 0-100.}
\item{trunc}{[logical], if true, causes viability data to be truncated to lie between 0 and 1 before
curve-fitting is performed.}
\item{area.type}{Should the area be computed using the actual data ("Actual"), or a fitted curve ("Fitted")}
\item{verbose}{[logical], if true, causes warnings thrown by the function to be printed.}
}
\value{
Numeric AUC value
}
\description{
Returns the AUC (Area Under the drug response Curve) given concentration and viability as input, normalized by the concentration
range of the experiment. The area returned is the response (1-Viablility) area, i.e. area under the curve when the response curve
is plotted on a log10 concentration scale, with high AUC implying high sensitivity to the drug. The function can calculate both
the area under a fitted Hill Curve to the data, and a trapz numeric integral of the actual data provided. Alternatively, the parameters
of a Hill Slope returned by logLogisticRegression can be passed in if they already known.
}
\examples{
dose <- c("0.0025","0.008","0.025","0.08","0.25","0.8","2.53","8")
viability <- c("108.67","111","102.16","100.27","90","87","74","57")
computeAUC(dose, viability)
}
| /man/computeAUC.Rd | no_license | Deepstatsanalysis/PharmacoGx | R | false | true | 2,658 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/computeAUC.R
\name{computeAUC}
\alias{computeAUC}
\title{Computes the AUC for a Drug Dose Viability Curve}
\usage{
computeAUC(
concentration,
viability,
Hill_fit,
conc_as_log = FALSE,
viability_as_pct = TRUE,
trunc = TRUE,
area.type = c("Fitted", "Actual"),
verbose = TRUE
)
}
\arguments{
\item{concentration}{[vector] is a vector of drug concentrations.}
\item{viability}{[vector] is a vector whose entries are the viability values observed in the presence of the
drug concentrations whose logarithms are in the corresponding entries of conc, where viability 0
indicates that all cells died, and viability 1 indicates that the drug had no effect on the cells.}
\item{Hill_fit}{[list or vector] In the order: c("Hill Slope", "E_inf", "EC50"), the parameters of a Hill Slope
as returned by logLogisticRegression. If conc_as_log is set then the function assumes logEC50 is passed in, and if
viability_as_pct flag is set, it assumes E_inf is passed in as a percent. Otherwise, E_inf is assumed to be a decimal,
and EC50 as a concentration.}
\item{conc_as_log}{[logical], if true, assumes that log10-concentration data has been given rather than concentration data.}
\item{viability_as_pct}{[logical], if false, assumes that viability is given as a decimal rather
than a percentage, and returns AUC as a decimal. Otherwise, viability is interpreted as percent, and AUC is returned 0-100.}
\item{trunc}{[logical], if true, causes viability data to be truncated to lie between 0 and 1 before
curve-fitting is performed.}
\item{area.type}{Should the area be computed using the actual data ("Actual"), or a fitted curve ("Fitted")}
\item{verbose}{[logical], if true, causes warnings thrown by the function to be printed.}
}
\value{
Numeric AUC value
}
\description{
Returns the AUC (Area Under the drug response Curve) given concentration and viability as input, normalized by the concentration
range of the experiment. The area returned is the response (1-Viablility) area, i.e. area under the curve when the response curve
is plotted on a log10 concentration scale, with high AUC implying high sensitivity to the drug. The function can calculate both
the area under a fitted Hill Curve to the data, and a trapz numeric integral of the actual data provided. Alternatively, the parameters
of a Hill Slope returned by logLogisticRegression can be passed in if they already known.
}
\examples{
dose <- c("0.0025","0.008","0.025","0.08","0.25","0.8","2.53","8")
viability <- c("108.67","111","102.16","100.27","90","87","74","57")
computeAUC(dose, viability)
}
|
# NIST constants non-SI
kNISTnonSIatomicUnitOf1stHyperpolarizability <- 3.206361449e-53
kNISTnonSIatomicUnitOf2ndHyperpolarizability <- 6.23538054e-65
kNISTnonSIatomicUnitOfAction <- 1.054571726e-34
kNISTnonSIatomicUnitOfCharge <- 1.602176565e-19
kNISTnonSIatomicUnitOfChargeDensity <- 1081202338000
kNISTnonSIatomicUnitOfCurrent <- 0.00662361795
kNISTnonSIatomicUnitOfElectricDipoleMoment <- 8.47835326e-30
kNISTnonSIatomicUnitOfElectricField <- 514220652000
kNISTnonSIatomicUnitOfElectricFieldGradient <- 9.717362e+21
kNISTnonSIatomicUnitOfElectricPolarizability <- 1.6487772754e-41
kNISTnonSIatomicUnitOfElectricPotential <- 27.21138505
kNISTnonSIatomicUnitOfElectricQuadrupoleMoment <- 4.486551331e-40
kNISTnonSIatomicUnitOfEnergy <- 4.35974434e-18
kNISTnonSIatomicUnitOfLength <- 5.2917721092e-11
kNISTnonSIatomicUnitOfMagneticDipoleMoment <- 1.854801936e-23
kNISTnonSIatomicUnitOfMagneticFluxDensity <- 235051.7464
kNISTnonSIatomicUnitOfMagnetizability <- 7.891036607e-29
kNISTnonSIatomicUnitOfMass <- 9.10938291e-31
kNISTnonSIatomicUnitOfMomentum <- 1.99285174e-24
kNISTnonSIatomicUnitOfPermittivity <- 1.112650056e-10
kNISTnonSIatomicUnitOfTime <- 2.418884326502e-17
kNISTnonSIatomicUnitOfVelocity <- 2187691.26379
kNISTnonSIelectronVolt <- 1.602176565e-19
kNISTnonSInaturalUnitOfAction <- 1.054571726e-34
kNISTnonSInaturalUnitOfActionInEVS <- 6.58211928e-16
kNISTnonSInaturalUnitOfEnergy <- 8.18710506e-14
kNISTnonSInaturalUnitOfEnergyInMeV <- 0.510998928
kNISTnonSInaturalUnitOfLength <- 3.86159268e-13
kNISTnonSInaturalUnitOfMass <- 9.10938291e-31
kNISTnonSInaturalUnitOfMomentum <- 2.73092429e-22
kNISTnonSInaturalUnitOfMomentumInMeVc <- 0.510998928
kNISTnonSInaturalUnitOfTime <- 1.28808866833e-21
kNISTnonSIPlanckConstantOver2PiTimesCInMeVFm <- 197.3269718
kNISTnonSIspeedOfLightInVacuum <- 299792458
kNISTnonSIunifiedAtomicMassUnit <- 1.660538921e-27
| /R/NISTConstantsNonSI.R | no_license | cran/NISTunits | R | false | false | 1,867 | r | # NIST constants non-SI
kNISTnonSIatomicUnitOf1stHyperpolarizability <- 3.206361449e-53
kNISTnonSIatomicUnitOf2ndHyperpolarizability <- 6.23538054e-65
kNISTnonSIatomicUnitOfAction <- 1.054571726e-34
kNISTnonSIatomicUnitOfCharge <- 1.602176565e-19
kNISTnonSIatomicUnitOfChargeDensity <- 1081202338000
kNISTnonSIatomicUnitOfCurrent <- 0.00662361795
kNISTnonSIatomicUnitOfElectricDipoleMoment <- 8.47835326e-30
kNISTnonSIatomicUnitOfElectricField <- 514220652000
kNISTnonSIatomicUnitOfElectricFieldGradient <- 9.717362e+21
kNISTnonSIatomicUnitOfElectricPolarizability <- 1.6487772754e-41
kNISTnonSIatomicUnitOfElectricPotential <- 27.21138505
kNISTnonSIatomicUnitOfElectricQuadrupoleMoment <- 4.486551331e-40
kNISTnonSIatomicUnitOfEnergy <- 4.35974434e-18
kNISTnonSIatomicUnitOfLength <- 5.2917721092e-11
kNISTnonSIatomicUnitOfMagneticDipoleMoment <- 1.854801936e-23
kNISTnonSIatomicUnitOfMagneticFluxDensity <- 235051.7464
kNISTnonSIatomicUnitOfMagnetizability <- 7.891036607e-29
kNISTnonSIatomicUnitOfMass <- 9.10938291e-31
kNISTnonSIatomicUnitOfMomentum <- 1.99285174e-24
kNISTnonSIatomicUnitOfPermittivity <- 1.112650056e-10
kNISTnonSIatomicUnitOfTime <- 2.418884326502e-17
kNISTnonSIatomicUnitOfVelocity <- 2187691.26379
kNISTnonSIelectronVolt <- 1.602176565e-19
kNISTnonSInaturalUnitOfAction <- 1.054571726e-34
kNISTnonSInaturalUnitOfActionInEVS <- 6.58211928e-16
kNISTnonSInaturalUnitOfEnergy <- 8.18710506e-14
kNISTnonSInaturalUnitOfEnergyInMeV <- 0.510998928
kNISTnonSInaturalUnitOfLength <- 3.86159268e-13
kNISTnonSInaturalUnitOfMass <- 9.10938291e-31
kNISTnonSInaturalUnitOfMomentum <- 2.73092429e-22
kNISTnonSInaturalUnitOfMomentumInMeVc <- 0.510998928
kNISTnonSInaturalUnitOfTime <- 1.28808866833e-21
kNISTnonSIPlanckConstantOver2PiTimesCInMeVFm <- 197.3269718
kNISTnonSIspeedOfLightInVacuum <- 299792458
kNISTnonSIunifiedAtomicMassUnit <- 1.660538921e-27
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.