content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(a = 0L, b = 0L, x = c(-21589L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610387591-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 418 | r | testlist <- list(a = 0L, b = 0L, x = c(-21589L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
#' Symbolic Circle of Correlations
#' @name sym.circle.plot
#' @aliases sym.circle.plot
#' @author Oldemar Rodriguez Rojas
#' @description Plot the symbolic circle of correlations.
#' @usage sym.circle.plot(prin.corre)
#' @param prin.corre A symbolic interval data matrix with correlations between the variables and the
#' principals componets, both of interval type.
#'
#' @return Plot the symbolic circle
#' @references
#' Rodriguez O. (2012). The Duality Problem in Interval Principal Components Analysis.
#' The 3rd Workshop in Symbolic Data Analysis, Madrid.
#'
#' @examples
#' data(oils)
#' res<-sym.interval.pca(oils,'centers')
#' sym.circle.plot(res$Sym.Prin.Correlations)
#'
#' @keywords Symbolic Circle
#' @export
#'
sym.circle.plot <- function(prin.corre) {
v <- c("green", "red", "blue", "cyan", "brown", "yellow", "pink", "purple", "orange",
"gray")
msg = paste("Correlation Circle")
plot(-1.5:1.5, -1.5:1.5, type = "n", xlab = "C1", ylab = "C2", main = msg)
abline(h = 0, lty = 3)
abline(v = 0, lty = 3)
symbols(0, 0, circles = 1, inches = FALSE, add = TRUE)
c1 = 1
c2 = 2
n <- dim(prin.corre)[1]
f <- dim(prin.corre)[2]
CRTI <- matrix(nrow = n, ncol = f)
CRTI <- prin.corre
vars <- rownames(prin.corre)
for (k in 1:n) {
x1 <- min(CRTI[k, c1], CRTI[k, c2])
x2 <- max(CRTI[k, c1], CRTI[k, c2])
y1 <- min(CRTI[k, c2 + 1], CRTI[k, c2 + 2])
y2 <- max(CRTI[k, c2 + 1], CRTI[k, c2 + 2])
if (((x1 > 0) && (x2 > 0) && (y1 > 0) && (y2 > 0)) || ((x1 < 0) && (x2 < 0) &&
(y1 < 0) && (y2 < 0))) {
plotX.slice(x1, y2, x2, y1, v, vars, k)
}
if (((x1 < 0) && (x2 < 0) && (y1 > 0) && (y2 > 0)) || ((x1 > 0) && (x2 > 0) &&
(y1 < 0) && (y2 < 0))) {
plotX.slice(x1, y1, x2, y2, v, vars, k)
}
if ((y1 > 0) && (y2 > 0) && (x1 < 0) && (x2 > 0)) {
plotX.slice(x1, y1, x2, y1, v, vars, k)
}
if ((y1 < 0) && (y2 < 0) && (x1 < 0) && (x2 > 0)) {
plotX.slice(x1, y2, x2, y2, v, vars, k)
}
if ((x1 > 0) && (x2 > 0) && (y1 < 0) && (y2 > 0)) {
plotX.slice(x1, y1, x1, y2, v, vars, k)
}
if ((x1 < 0) && (x2 < 0) && (y1 < 0) && (y2 > 0)) {
plotX.slice(x2, y1, x2, y2, v, vars, k)
}
if ((x1 < 0) && (x2 > 0) && (y1 < 0) && (y2 > 0)) {
plotX.slice(x2, y1, x2, y2, v, vars, k)
}
}
}
| /R/sym.circle.plot.R | no_license | rcannood/RSDA | R | false | false | 2,484 | r | #' Symbolic Circle of Correlations
#' @name sym.circle.plot
#' @aliases sym.circle.plot
#' @author Oldemar Rodriguez Rojas
#' @description Plot the symbolic circle of correlations.
#' @usage sym.circle.plot(prin.corre)
#' @param prin.corre A symbolic interval data matrix with correlations between the variables and the
#' principals componets, both of interval type.
#'
#' @return Plot the symbolic circle
#' @references
#' Rodriguez O. (2012). The Duality Problem in Interval Principal Components Analysis.
#' The 3rd Workshop in Symbolic Data Analysis, Madrid.
#'
#' @examples
#' data(oils)
#' res<-sym.interval.pca(oils,'centers')
#' sym.circle.plot(res$Sym.Prin.Correlations)
#'
#' @keywords Symbolic Circle
#' @export
#'
sym.circle.plot <- function(prin.corre) {
v <- c("green", "red", "blue", "cyan", "brown", "yellow", "pink", "purple", "orange",
"gray")
msg = paste("Correlation Circle")
plot(-1.5:1.5, -1.5:1.5, type = "n", xlab = "C1", ylab = "C2", main = msg)
abline(h = 0, lty = 3)
abline(v = 0, lty = 3)
symbols(0, 0, circles = 1, inches = FALSE, add = TRUE)
c1 = 1
c2 = 2
n <- dim(prin.corre)[1]
f <- dim(prin.corre)[2]
CRTI <- matrix(nrow = n, ncol = f)
CRTI <- prin.corre
vars <- rownames(prin.corre)
for (k in 1:n) {
x1 <- min(CRTI[k, c1], CRTI[k, c2])
x2 <- max(CRTI[k, c1], CRTI[k, c2])
y1 <- min(CRTI[k, c2 + 1], CRTI[k, c2 + 2])
y2 <- max(CRTI[k, c2 + 1], CRTI[k, c2 + 2])
if (((x1 > 0) && (x2 > 0) && (y1 > 0) && (y2 > 0)) || ((x1 < 0) && (x2 < 0) &&
(y1 < 0) && (y2 < 0))) {
plotX.slice(x1, y2, x2, y1, v, vars, k)
}
if (((x1 < 0) && (x2 < 0) && (y1 > 0) && (y2 > 0)) || ((x1 > 0) && (x2 > 0) &&
(y1 < 0) && (y2 < 0))) {
plotX.slice(x1, y1, x2, y2, v, vars, k)
}
if ((y1 > 0) && (y2 > 0) && (x1 < 0) && (x2 > 0)) {
plotX.slice(x1, y1, x2, y1, v, vars, k)
}
if ((y1 < 0) && (y2 < 0) && (x1 < 0) && (x2 > 0)) {
plotX.slice(x1, y2, x2, y2, v, vars, k)
}
if ((x1 > 0) && (x2 > 0) && (y1 < 0) && (y2 > 0)) {
plotX.slice(x1, y1, x1, y2, v, vars, k)
}
if ((x1 < 0) && (x2 < 0) && (y1 < 0) && (y2 > 0)) {
plotX.slice(x2, y1, x2, y2, v, vars, k)
}
if ((x1 < 0) && (x2 > 0) && (y1 < 0) && (y2 > 0)) {
plotX.slice(x2, y1, x2, y2, v, vars, k)
}
}
}
|
## Use cache to save time in loops, specially if results of a calculation
## don't change between iterations.
## this function, called makeCacheMatrix, creates a list, that "saves"
## the solve of the matrix, after
## the first call of cacheSolve.
## If cacheSolve is called again, before another run of makeCacheMatrix,
## the result will be grabbed from the list.
## If makeCacheMatrix is called again, the list is flushed.
## usage: mat <- makeCacheMatrix(matrix(runif(100, 5.0, 7.5), nrow=10, ncol=10))
## usage: mat <- makeCacheMatrix(matrix(rnorm(100), nrow=10, ncol=10))
## usage: mat <- makeCacheMatrix(matrix(rnorm(s*s), nrow=s, ncol=s))
## where s <- 10 (make a matrix 10 by 10, with 100 random numbers)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
setmatrix <- function(y) { #Populate set with the matrix
x <<- y
m <<- NULL
}
getmatrix <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(setsolve = setsolve, getsolve = getsolve,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## This function, called cacheSolve, will try to get cached result from
## makeCacheMatrix, if it exists.
## usage: cacheSolve(mat), where "mat" is the name of the matrix made with
## the function makeCacheMatrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
##start.time = Sys.time()
m <- x$getsolve()
if(!is.null(m)) {
##dur = Sys.time() - start.time
##nocache <- dur
message("getting cached data")
return(m)
}
data <- x$getmatrix()
m <- solve(data, ...)
x$setsolve(m)
##dur = Sys.time() - start.time
##cache <- dur
return(m)
}
## This function, called test, calls both makeCacheMatrix and cacheSolve
## several times, and print the results to the console window.
## usage: test(x, s) where x is the number of iterations, and s is the size
## of the matrix. (nrow = s and ncol = s)
## It is NOT advisable to input an s bigger than 1000 !!
test = function(x, s){
for (i in 1:x) {
cat("Iteration: ", i, sep = "")
message("")
temp = makeCacheMatrix(matrix(rnorm(s*s), nrow=s, ncol=s))
start.time = Sys.time()
cacheSolve(temp)
dur = Sys.time() - start.time
message("This is the calculation without caching")
print(dur)
message("")
start.time = Sys.time()
cacheSolve(temp)
dur = Sys.time() - start.time
message("This is the calculation with caching")
print(dur)
message("")
}
} | /cachematrix.R | no_license | Stakseng/ProgrammingAssignment2 | R | false | false | 2,529 | r | ## Use cache to save time in loops, specially if results of a calculation
## don't change between iterations.
## this function, called makeCacheMatrix, creates a list, that "saves"
## the solve of the matrix, after
## the first call of cacheSolve.
## If cacheSolve is called again, before another run of makeCacheMatrix,
## the result will be grabbed from the list.
## If makeCacheMatrix is called again, the list is flushed.
## usage: mat <- makeCacheMatrix(matrix(runif(100, 5.0, 7.5), nrow=10, ncol=10))
## usage: mat <- makeCacheMatrix(matrix(rnorm(100), nrow=10, ncol=10))
## usage: mat <- makeCacheMatrix(matrix(rnorm(s*s), nrow=s, ncol=s))
## where s <- 10 (make a matrix 10 by 10, with 100 random numbers)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
setmatrix <- function(y) { #Populate set with the matrix
x <<- y
m <<- NULL
}
getmatrix <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(setsolve = setsolve, getsolve = getsolve,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## This function, called cacheSolve, will try to get cached result from
## makeCacheMatrix, if it exists.
## usage: cacheSolve(mat), where "mat" is the name of the matrix made with
## the function makeCacheMatrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
##start.time = Sys.time()
m <- x$getsolve()
if(!is.null(m)) {
##dur = Sys.time() - start.time
##nocache <- dur
message("getting cached data")
return(m)
}
data <- x$getmatrix()
m <- solve(data, ...)
x$setsolve(m)
##dur = Sys.time() - start.time
##cache <- dur
return(m)
}
## This function, called test, calls both makeCacheMatrix and cacheSolve
## several times, and print the results to the console window.
## usage: test(x, s) where x is the number of iterations, and s is the size
## of the matrix. (nrow = s and ncol = s)
## It is NOT advisable to input an s bigger than 1000 !!
test = function(x, s){
for (i in 1:x) {
cat("Iteration: ", i, sep = "")
message("")
temp = makeCacheMatrix(matrix(rnorm(s*s), nrow=s, ncol=s))
start.time = Sys.time()
cacheSolve(temp)
dur = Sys.time() - start.time
message("This is the calculation without caching")
print(dur)
message("")
start.time = Sys.time()
cacheSolve(temp)
dur = Sys.time() - start.time
message("This is the calculation with caching")
print(dur)
message("")
}
} |
<html>
<head>
<meta name="TextLength" content="SENT_NUM:2, WORD_NUM:58">
</head>
<body bgcolor="white">
<a href="#0" id="0">We then create a training instance for each pair of two consecutive basic edits: if two consecutive basic edits need to be merged, we will mark the outcome as True , otherwise it is False .</a>
<a href="#1" id="1">Due to the effort involved in comparing revisions with the original texts, students often fail to learn from these revisions [16] .</a>
</body>
</html> | /ACL-Dataset/Summary_rnd/P14-2098.xhtml.A.R | no_license | Angela7126/SLNSumEval | R | false | false | 489 | r | <html>
<head>
<meta name="TextLength" content="SENT_NUM:2, WORD_NUM:58">
</head>
<body bgcolor="white">
<a href="#0" id="0">We then create a training instance for each pair of two consecutive basic edits: if two consecutive basic edits need to be merged, we will mark the outcome as True , otherwise it is False .</a>
<a href="#1" id="1">Due to the effort involved in comparing revisions with the original texts, students often fail to learn from these revisions [16] .</a>
</body>
</html> |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoplot.klm.R
\name{autoplot.klm}
\alias{autoplot.klm}
\title{Builds a ggplot object to plot parameter estimates against distance}
\usage{
autoplot.klm(x, ...)
}
\arguments{
\item{x}{A \code{\link{klm}} object.}
\item{...}{additional arguments (currently unused).}
}
\value{
a \code{\link[ggplot2]{ggplot}} object
}
\description{
Builds a ggplot object to plot parameter estimates against distance
}
\seealso{
Other RSPP plot functions:
\code{\link{autoplot.klmci}()},
\code{\link{autoplot.klmerci}()},
\code{\link{autoplot.klmer}()},
\code{\link{makePlotData_klmci}()},
\code{\link{makePlotData_klmerci}()},
\code{\link{makePlotData_klmer}()},
\code{\link{makePlotData_klm}()},
\code{\link{plot.klmci}()},
\code{\link{plot.klmerci}()},
\code{\link{plot.klmer}()},
\code{\link{plot.klm}()}
}
\concept{RSPP plot functions}
| /man/autoplot.klm.Rd | no_license | BagchiLab-Uconn/RSPPlme4 | R | false | true | 903 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoplot.klm.R
\name{autoplot.klm}
\alias{autoplot.klm}
\title{Builds a ggplot object to plot parameter estimates against distance}
\usage{
autoplot.klm(x, ...)
}
\arguments{
\item{x}{A \code{\link{klm}} object.}
\item{...}{additional arguments (currently unused).}
}
\value{
a \code{\link[ggplot2]{ggplot}} object
}
\description{
Builds a ggplot object to plot parameter estimates against distance
}
\seealso{
Other RSPP plot functions:
\code{\link{autoplot.klmci}()},
\code{\link{autoplot.klmerci}()},
\code{\link{autoplot.klmer}()},
\code{\link{makePlotData_klmci}()},
\code{\link{makePlotData_klmerci}()},
\code{\link{makePlotData_klmer}()},
\code{\link{makePlotData_klm}()},
\code{\link{plot.klmci}()},
\code{\link{plot.klmerci}()},
\code{\link{plot.klmer}()},
\code{\link{plot.klm}()}
}
\concept{RSPP plot functions}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/drawing.R
\name{drawUtilityPlots}
\alias{drawUtilityPlots}
\title{Draw marginal value functions and chart of alternative utilities}
\usage{
drawUtilityPlots(problem, solution, printLabels = TRUE, criteria = NULL,
plotsPerRow = 2, descending = NULL)
}
\arguments{
\item{problem}{Problem.}
\item{solution}{Solution.}
\item{printLabels}{Whether to print labels.}
\item{criteria}{Vector containing \emph{0} for utility chart and/or indices
of criteria for which marginal value functions should be plotted.
If this parameter was \code{NULL} functions for all criteria and utility chart
will be plotted (default \code{NULL}).}
\item{plotsPerRow}{Number of plots per row (default \code{2}).}
\item{descending}{Mode of sorting alternatives on utility chart:
\itemize{
\item \code{NULL} - unsorted, preserved \code{problem$perf} order,
\item \code{TRUE} - sorted descending by value of utility,
\item \code{FALSE} - sorted ascending by value of utility.
}}
}
\description{
This function draws marginal value functions and alternative utilities chart.
}
\details{
This function is deprecated. Use \code{\link{plotVF}} and \code{\link{plotComprehensiveValue}}.
}
\seealso{
\code{\link{plotVF}}
\code{\link{plotComprehensiveValue}}
}
| /man/drawUtilityPlots.Rd | no_license | cran/rorutadis | R | false | false | 1,358 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/drawing.R
\name{drawUtilityPlots}
\alias{drawUtilityPlots}
\title{Draw marginal value functions and chart of alternative utilities}
\usage{
drawUtilityPlots(problem, solution, printLabels = TRUE, criteria = NULL,
plotsPerRow = 2, descending = NULL)
}
\arguments{
\item{problem}{Problem.}
\item{solution}{Solution.}
\item{printLabels}{Whether to print labels.}
\item{criteria}{Vector containing \emph{0} for utility chart and/or indices
of criteria for which marginal value functions should be plotted.
If this parameter was \code{NULL} functions for all criteria and utility chart
will be plotted (default \code{NULL}).}
\item{plotsPerRow}{Number of plots per row (default \code{2}).}
\item{descending}{Mode of sorting alternatives on utility chart:
\itemize{
\item \code{NULL} - unsorted, preserved \code{problem$perf} order,
\item \code{TRUE} - sorted descending by value of utility,
\item \code{FALSE} - sorted ascending by value of utility.
}}
}
\description{
This function draws marginal value functions and alternative utilities chart.
}
\details{
This function is deprecated. Use \code{\link{plotVF}} and \code{\link{plotComprehensiveValue}}.
}
\seealso{
\code{\link{plotVF}}
\code{\link{plotComprehensiveValue}}
}
|
# 09/20/2020
# 13 - Date and Times
library(tidyverse)
library(lubridate)
library(nycflights13)
today()
now()
ymd("2017-01-31")
mdy("January 31st, 2017")
dmy("31-Jan-2017")
ymd(20170131)
ymd_hms("2017-01-31 20:11:59")
mdy_hm("01/31/2017 08:01")
ymd(20170131, tz = "UTC")
flights %>%
select(year, month, day, hour, minute) %>%
mutate(
departure = make_datetime(year,month, day, hour, minute)
)
make_datetime_100 <- function(year,month,day,time) {
make_datetime(year,month,day,time %/% 100, time %% 100)
}
flights_dt <- flights %>%
filter(!is.na(dep_time), !is.na(arr_time)) %>%
mutate(
dep_time = make_datetime_100(year, month, day, dep_time),
arr_time = make_datetime_100(year, month, day, arr_time),
sched_dep_time = make_datetime_100(
year, month, day, sched_dep_time
),
sched_arr_time = make_datetime_100(
year, month, day, sched_arr_time
)
) %>%
select(origin, dest, ends_with("delay"), ends_with("time"))
flights_dt
flights_dt %>%
ggplot(aes(dep_time)) +
geom_freqpoly(binwidth = 86400)
flights_dt %>%
filter(dep_time < ymd(20130102)) %>%
ggplot(aes(dep_time)) +
geom_freqpoly(binwidth = 600)
as_datetime(today())
as_date(now())
as_datetime(60 * 60 * 10)
as_date(365 * 10 + 2)
datetime <- ymd_hms("2016-07-08 12:34:56")
year(datetime)
month(datetime)
mday(datetime)
yday(datetime)
wday(datetime)
clay.bday <- ymd("1975-09-05")
wday(clay.bday)
month(datetime, label = TRUE)
wday(datetime, label = TRUE, abbr = FALSE)
flights_dt %>%
mutate(wday = wday(dep_time, label = TRUE)) %>%
ggplot(aes(x = wday)) +
geom_bar()
flights_dt %>%
mutate(minute = minute(dep_time)) %>%
group_by(minute) %>%
summarize(
avg_delay = mean(arr_delay, na.rm = TRUE),
n = n()) %>%
ggplot(aes(minute, avg_delay)) +
geom_line()
sched_dep <- flights_dt %>%
mutate(minute = minute(sched_dep_time)) %>%
group_by(minute) %>%
summarize(
avg_delay = mean(arr_delay, na.rm = TRUE),
n = n())
sched_dep %>%
ggplot(aes(minute, avg_delay)) +
geom_line()
sched_dep %>%
ggplot(aes(minute, n)) +
geom_line()
flights_dt %>%
count(week = floor_date(dep_time, "week")) %>%
ggplot(aes(week, n)) +
geom_line()
(datetime <- ymd_hms("2016-07-08 12:34:56"))
datetime
year(datetime) <- 2020
month(datetime) <- 01
hour(datetime) <- hour(datetime) + 1
datetime
update(datetime, year = 2020, month=2, mday=2, hour=2)
ymd("2015-02-01") %>%
update(mday = 30)
ymd("2015-02-01") %>%
update(hour = 400)
flights_dt %>%
mutate(dep_hour = update(dep_time, yday =1)) %>%
ggplot(aes(dep_hour)) +
geom_freqpoly(binwidth = 300)
h_age <- today() - ymd(19791014)
h_age
as.duration(h_age)
dseconds(15)
dminutes(10)
dhours(c(12,24))
ddays(0:5)
dweeks(3)
dyears(1)
2 * dyears(1)
dyears(1) + dweeks(12) + dhours(15)
tommorrow <- today() + ddays(1)
tommorrow
last_year <- today() - dyears(1)
last_year
one_pm <- ymd_hms(
"2016-03-12 13:00:00",
tz = "America/New_York"
)
one_pm
one_pm + ddays(10)
one_pm
one_pm + days(1)
seconds(15)
minutes(10)
hours(c(12,24))
days(7)
months(1:6)
weeks(3)
years(1)
10 * (month(6) + days(1))
days(50) + hours(25) + minutes(2)
ymd("2016-01-01") + dyears(1)
ymd("2016-01-01)") + years(1)
one_pm + ddays(1)
one_pm + days(1)
flights_dt %>%
filter(arr_time < dep_time)
flights_dt <- flights_dt %>%
mutate(
overnight = arr_time < dep_time,
arr_time = arr_time + days(overnight * 1),
sched_arr_time = sched_arr_time + days(overnight * 1)
)
years(1) / days(1)
next_year <- today() + years(1)
(today() %--% next_year) / ddays(1)
(today() %--% next_year) %/% days(1)
Sys.timezone()
length(OlsonNames())
head(OlsonNames())
(x1 <- ymd_hms("2015-06-01 12:00:00", tz = "America/New_York"))
(x2 <- ymd_hms("2015-06-01 18:00:00", tz = "Europe/Copenhagen"))
(x3 <- ymd_hms("2015-06-02 04:00:00", tz = "Pacific/Auckland"))
x1 - x2
x1 - x3
x4 <- c(x1,x2,x3)
x4
x4a <- with_tz(x4, tzone = "Australia/Lord_Howe")
| /insights/13-date-lubridate.R | no_license | eacatalyst/insights-with-r | R | false | false | 4,000 | r | # 09/20/2020
# 13 - Date and Times
library(tidyverse)
library(lubridate)
library(nycflights13)
today()
now()
ymd("2017-01-31")
mdy("January 31st, 2017")
dmy("31-Jan-2017")
ymd(20170131)
ymd_hms("2017-01-31 20:11:59")
mdy_hm("01/31/2017 08:01")
ymd(20170131, tz = "UTC")
flights %>%
select(year, month, day, hour, minute) %>%
mutate(
departure = make_datetime(year,month, day, hour, minute)
)
make_datetime_100 <- function(year,month,day,time) {
make_datetime(year,month,day,time %/% 100, time %% 100)
}
flights_dt <- flights %>%
filter(!is.na(dep_time), !is.na(arr_time)) %>%
mutate(
dep_time = make_datetime_100(year, month, day, dep_time),
arr_time = make_datetime_100(year, month, day, arr_time),
sched_dep_time = make_datetime_100(
year, month, day, sched_dep_time
),
sched_arr_time = make_datetime_100(
year, month, day, sched_arr_time
)
) %>%
select(origin, dest, ends_with("delay"), ends_with("time"))
flights_dt
flights_dt %>%
ggplot(aes(dep_time)) +
geom_freqpoly(binwidth = 86400)
flights_dt %>%
filter(dep_time < ymd(20130102)) %>%
ggplot(aes(dep_time)) +
geom_freqpoly(binwidth = 600)
as_datetime(today())
as_date(now())
as_datetime(60 * 60 * 10)
as_date(365 * 10 + 2)
datetime <- ymd_hms("2016-07-08 12:34:56")
year(datetime)
month(datetime)
mday(datetime)
yday(datetime)
wday(datetime)
clay.bday <- ymd("1975-09-05")
wday(clay.bday)
month(datetime, label = TRUE)
wday(datetime, label = TRUE, abbr = FALSE)
flights_dt %>%
mutate(wday = wday(dep_time, label = TRUE)) %>%
ggplot(aes(x = wday)) +
geom_bar()
flights_dt %>%
mutate(minute = minute(dep_time)) %>%
group_by(minute) %>%
summarize(
avg_delay = mean(arr_delay, na.rm = TRUE),
n = n()) %>%
ggplot(aes(minute, avg_delay)) +
geom_line()
sched_dep <- flights_dt %>%
mutate(minute = minute(sched_dep_time)) %>%
group_by(minute) %>%
summarize(
avg_delay = mean(arr_delay, na.rm = TRUE),
n = n())
sched_dep %>%
ggplot(aes(minute, avg_delay)) +
geom_line()
sched_dep %>%
ggplot(aes(minute, n)) +
geom_line()
flights_dt %>%
count(week = floor_date(dep_time, "week")) %>%
ggplot(aes(week, n)) +
geom_line()
(datetime <- ymd_hms("2016-07-08 12:34:56"))
datetime
year(datetime) <- 2020
month(datetime) <- 01
hour(datetime) <- hour(datetime) + 1
datetime
update(datetime, year = 2020, month=2, mday=2, hour=2)
ymd("2015-02-01") %>%
update(mday = 30)
ymd("2015-02-01") %>%
update(hour = 400)
flights_dt %>%
mutate(dep_hour = update(dep_time, yday =1)) %>%
ggplot(aes(dep_hour)) +
geom_freqpoly(binwidth = 300)
h_age <- today() - ymd(19791014)
h_age
as.duration(h_age)
dseconds(15)
dminutes(10)
dhours(c(12,24))
ddays(0:5)
dweeks(3)
dyears(1)
2 * dyears(1)
dyears(1) + dweeks(12) + dhours(15)
tommorrow <- today() + ddays(1)
tommorrow
last_year <- today() - dyears(1)
last_year
one_pm <- ymd_hms(
"2016-03-12 13:00:00",
tz = "America/New_York"
)
one_pm
one_pm + ddays(10)
one_pm
one_pm + days(1)
seconds(15)
minutes(10)
hours(c(12,24))
days(7)
months(1:6)
weeks(3)
years(1)
10 * (month(6) + days(1))
days(50) + hours(25) + minutes(2)
ymd("2016-01-01") + dyears(1)
ymd("2016-01-01)") + years(1)
one_pm + ddays(1)
one_pm + days(1)
flights_dt %>%
filter(arr_time < dep_time)
flights_dt <- flights_dt %>%
mutate(
overnight = arr_time < dep_time,
arr_time = arr_time + days(overnight * 1),
sched_arr_time = sched_arr_time + days(overnight * 1)
)
years(1) / days(1)
next_year <- today() + years(1)
(today() %--% next_year) / ddays(1)
(today() %--% next_year) %/% days(1)
Sys.timezone()
length(OlsonNames())
head(OlsonNames())
(x1 <- ymd_hms("2015-06-01 12:00:00", tz = "America/New_York"))
(x2 <- ymd_hms("2015-06-01 18:00:00", tz = "Europe/Copenhagen"))
(x3 <- ymd_hms("2015-06-02 04:00:00", tz = "Pacific/Auckland"))
x1 - x2
x1 - x3
x4 <- c(x1,x2,x3)
x4
x4a <- with_tz(x4, tzone = "Australia/Lord_Howe")
|
context("test build_lm part 2")
test_that("binary prediction with character target column", {
test_data <- structure(
list(
`CANCELLED X` = c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c(NA, "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(Inf, -Inf, NA, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545)), row.names = c(NA, -20L),
class = c("tbl_df", "tbl", "data.frame"), .Names = c("CANCELLED X", "Carrier Name", "CARRIER", "DISTANCE"))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
# duplicate rows to make some predictable data
# otherwise, the number of rows of the result of prediction becomes 0
test_data <- dplyr::bind_rows(test_data, test_data)
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE,
normalize_predictors = TRUE,
model_type = "glm", smote=FALSE, with_marginal_effects=TRUE, with_marginal_effects_confint=TRUE)
ret <- test_data %>% select(-`CANCELLED X`) %>% add_prediction(model_df=model_data)
ret <- model_data %>% prediction(data="newdata", data_frame=test_data)
ret <- model_data %>% tidy_rowwise(model, type="vif")
ret <- model_data %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(colnames(ret), c("AUC","F1 Score","Accuracy Rate","Misclass. Rate","Precision",
"Recall","P Value","Rows","Rows for TRUE","Rows for FALSE",
"Log Likelihood","AIC","BIC","Residual Deviance","Residual DF","Null Deviance",
"Null Model DF"))
expect_equal(ret$`Rows`, 34)
expect_equal(ret$`Rows for TRUE`, 4) # This ends up to be 4 after doubling
expect_equal(ret$`Rows for FALSE`, 30) # This ends up to be 30 after doubling and removing NA rows.
ret <- model_data %>% tidy_rowwise(model)
ret <- model_data %>% augment_rowwise(model)
expect_true(nrow(ret) > 0)
})
test_that("binary prediction with factor target column", {
test_data <- tibble::tibble(
`CANCELLED X` = factor(c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"), levels=c("A","N","Y","B")),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c(NA, "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(Inf, -Inf, NA, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
# duplicate rows to make some predictable data
# otherwise, the number of rows of the result of prediction becomes 0
test_data <- dplyr::bind_rows(test_data, test_data)
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, with_marginal_effects=TRUE, with_marginal_effects_confint=FALSE)
ret <- model_data %>% prediction(data="newdata", data_frame=test_data)
ret <- model_data %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(ret$`Rows`, 34)
expect_equal(ret$`Rows for TRUE`, 4) # This ends up to be 4 after doubling
expect_equal(ret$`Rows for FALSE`, 30) # This ends up to be 30 after doubling and removing NA rows.
ret <- model_data %>% tidy_rowwise(model)
ret <- model_data %>% augment_rowwise(model)
expect_true(nrow(ret) > 0)
})
test_that("binary prediction with variable_metric argument", {
test_data <- structure(
list(
`CANCELLED X` = factor(c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"), levels=c("A","N","Y","B")),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c(NA, "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(Inf, -Inf, NA, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545)), row.names = c(NA, -20L),
class = c("tbl_df", "tbl", "data.frame"), .Names = c("CANCELLED X", "Carrier Name", "CARRIER", "DISTANCE"))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
# duplicate rows to make some predictable data
# otherwise, the number of rows of the result of prediction becomes 0
test_data <- dplyr::bind_rows(test_data, test_data)
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, variable_metric="odds_ratio")
ret <- model_data %>% tidy_rowwise(model, variable_metric="odds_ratio")
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, variable_metric="coefficient")
ret <- model_data %>% tidy_rowwise(model, variable_metric="coefficient")
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, variable_metric="ame")
ret <- model_data %>% tidy_rowwise(model, variable_metric="ame")
expect_true(c("ame") %in% colnames(ret))
expect_true(nrow(ret) > 0)
})
test_data <- tibble::tibble(
`CANCELLED X` = c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c("AA", "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(10, 12, 12, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545),
ARR_TIME = c(10, 32, 321, 342, 123, 98, 10, 21, 80, 211, 121, 87, 821, 213, 213, 923, 121, 76, 34, 50),
DERAY_TIME = c(12, 42, 321, 31, 3, 43, 342, 764, 123, 43, 50, 12, 876, 12, 34, 45, 84, 25, 87, 352))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
test_data$klass <- c(rep("A", 10), rep("B", 10))
test_that("add_prediction with linear regression", {
model_df <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
target_fun="log",
predictor_funs=list(ARR_TIME="log", DELAY_TIME="none", "Carrier Name"="none"),
model_type = "lm")
ret <- test_data %>% select(-DISTANCE) %>% add_prediction(model_df=model_df)
df2 <- test_data %>% select(-DISTANCE)
ret <- df2 %>% add_prediction(model_df=model_df)
expect_equal(colnames(df2), colnames(ret)[1:length(colnames(df2))]) # Check that the df2 column order is kept.
expect_error({
ret <- test_data %>% select(-DISTANCE, -ARR_TIME) %>% add_prediction(model_df=model_df)
}, regexp=".*ARR_TIME.*Columns are required for the model, but do not exist.*")
})
test_that("Linear Regression with test rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "lm",
test_rate = 0.1,
test_split_type = "ordered") # testing ordered split too.
res <- ret %>% tidy_rowwise(model)
expect_true("Carrier Name: American Airlines" %in% res$term)
res <- ret %>% tidy_rowwise(model, type="vif")
expect_true("Carrier Name" %in% res$term)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(res$`Rows`, 17)
variables <- (ret %>% tidy_rowwise(model, type="importance") %>% arrange(desc(importance)))$variable
names(variables) <- NULL
res <- ret %>% lm_partial_dependence()
expect_equal(levels(res$x_name), variables) # Factor order of the PDP should be the same as the importance.
expect_true(all(c("conf_high", "conf_low", "bin_sample_size") %in% colnames(res)))
})
})
test_that("Linear Regression with outlier filtering", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "lm",
test_rate = 0.3,
normalize_predictors = TRUE, # testing target normalization too.
target_outlier_filter_type="percentile",
target_outlier_filter_threshold=0.9) # testing outlier filter too.
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
#training_rownum <- nrow(test_data) - test_rownum
training_rownum <- nrow(ret$source.data[[1]]) - test_rownum
suppressWarnings({
pred_new <- ret %>% prediction(data="newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(res$`Rows`, 12)
})
})
test_that("Group Linear Regression with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "lm",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- ret %>% prediction(data="newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("GLM - Normal Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% evaluate_lm_training_and_test(pretty.name=TRUE)
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
})
})
test_that("Group GLM - Normal Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
})
})
test_that("GLM - Gamma Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "Gamma",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
res <- prediction(ret, data = "training_and_test", pretty.name=TRUE)
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("Group GLM - Gamma Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "Gamma",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("GLM - Inverse Gaussian Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "inverse.gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("Group GLM - Inverse Gaussian Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "inverse.gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("add_prediction with poisson regression", {
model_df <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
predictor_funs=list(ARR_TIME="log", DELAY_TIME="none", "Carrier Name"="none"),
model_type = "glm",
family = "poisson",
importance_measure="firm")
ret <- test_data %>% select(-DISTANCE) %>% add_prediction(model_df=model_df)
})
test_that("GLM - poisson Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "poisson",
test_rate = 0.1,
importance_measure="firm")
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
})
})
test_that("Group GLM - Poisson Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "poisson",
test_rate = 0.3)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
res <- ret %>% lm_partial_dependence()
})
})
test_that("GLM - Negative Binomial Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "negativebinomial",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low","conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
res <- ret %>% lm_partial_dependence()
})
})
test_that("Group GLM - Negative Binomial Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "negativebinomial",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low","conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
res <- ret %>% lm_partial_dependence()
})
})
test_that("add_prediction with logistic regression", {
model_df <- test_data %>% build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
predictor_funs=list(ARR_TIME="log", DELAY_TIME="none", "Carrier Name"="none"),
model_type = "glm",
importance_measure="firm")
ret <- test_data %>% select(-`CANCELLED X`) %>% add_prediction(model_df=model_df)
expect_true(all(c("predicted_probability", "linear_predictor","predicted_label") %in% colnames(ret)))
})
test_that("Logistic Regression with test_rate", {
ret <- test_data %>% build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
family = "binomial",
model_type = "glm",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
variables <- (ret %>% tidy_rowwise(model, type="importance") %>% arrange(desc(importance)))$variable
names(variables) <- NULL
res <- ret %>% lm_partial_dependence()
expect_equal(levels(res$x_name), variables) # Factor order of the PDP should be the same as the importance.
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training_and_test <- ret %>% prediction_binary(data = 'training_and_test', threshold = 0.5)
pred_training_and_test_conf_mat <- ret %>% prediction_training_and_test(prediction_type = 'conf_mat', threshold = 0.5)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("CANCELLED X", "Carrier Name", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("CANCELLED X", "Carrier Name", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% tidy_rowwise(model, pretty.name=TRUE)
expected_cols <- c("Term", "Coefficient", "Std Error", "t Value", "P Value", "Conf High", "Conf Low", "Odds Ratio", "Base Level")
expect_true(all(expected_cols %in% colnames(res)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% evaluate_binary_training_and_test(`CANCELLED X`, threshold = 0.5, pretty.name=TRUE)
expect_equal(nrow(res), 2) # 2 for training and test.
res <- ret %>% lm_partial_dependence()
})
})
test_that("Group Logistic Regression with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
model_type = "glm",
family = "binomial",
link = "logit",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
# Since broom 0.7.0, I sometimes see "residuals" missing here, but not consistently. Will keep watching.
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("Group Logistic Regression with test_rate with weight", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>% mutate(Weight=sin(1:n())+1) %>%
build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
weight=`Weight`,
model_type = "glm",
family = "binomial",
link = "logit",
test_rate = 0.1)
# Check the numbers so that we can detect any change in broom or stats in the future.
expect_equal((ret %>% tidy_rowwise(model))$estimate, c(-24.840867308, 0.001245984, -1.104902459, -0.002945304), tolerance = 0.001)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
# Since broom 0.7.0, I sometimes see "residuals" missing here, but not consistently. Will keep watching.
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
}) | /tests/testthat/test_build_lm_1.R | permissive | exploratory-io/exploratory_func | R | false | false | 41,191 | r | context("test build_lm part 2")
test_that("binary prediction with character target column", {
test_data <- structure(
list(
`CANCELLED X` = c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c(NA, "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(Inf, -Inf, NA, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545)), row.names = c(NA, -20L),
class = c("tbl_df", "tbl", "data.frame"), .Names = c("CANCELLED X", "Carrier Name", "CARRIER", "DISTANCE"))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
# duplicate rows to make some predictable data
# otherwise, the number of rows of the result of prediction becomes 0
test_data <- dplyr::bind_rows(test_data, test_data)
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE,
normalize_predictors = TRUE,
model_type = "glm", smote=FALSE, with_marginal_effects=TRUE, with_marginal_effects_confint=TRUE)
ret <- test_data %>% select(-`CANCELLED X`) %>% add_prediction(model_df=model_data)
ret <- model_data %>% prediction(data="newdata", data_frame=test_data)
ret <- model_data %>% tidy_rowwise(model, type="vif")
ret <- model_data %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(colnames(ret), c("AUC","F1 Score","Accuracy Rate","Misclass. Rate","Precision",
"Recall","P Value","Rows","Rows for TRUE","Rows for FALSE",
"Log Likelihood","AIC","BIC","Residual Deviance","Residual DF","Null Deviance",
"Null Model DF"))
expect_equal(ret$`Rows`, 34)
expect_equal(ret$`Rows for TRUE`, 4) # This ends up to be 4 after doubling
expect_equal(ret$`Rows for FALSE`, 30) # This ends up to be 30 after doubling and removing NA rows.
ret <- model_data %>% tidy_rowwise(model)
ret <- model_data %>% augment_rowwise(model)
expect_true(nrow(ret) > 0)
})
test_that("binary prediction with factor target column", {
test_data <- tibble::tibble(
`CANCELLED X` = factor(c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"), levels=c("A","N","Y","B")),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c(NA, "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(Inf, -Inf, NA, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
# duplicate rows to make some predictable data
# otherwise, the number of rows of the result of prediction becomes 0
test_data <- dplyr::bind_rows(test_data, test_data)
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, with_marginal_effects=TRUE, with_marginal_effects_confint=FALSE)
ret <- model_data %>% prediction(data="newdata", data_frame=test_data)
ret <- model_data %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(ret$`Rows`, 34)
expect_equal(ret$`Rows for TRUE`, 4) # This ends up to be 4 after doubling
expect_equal(ret$`Rows for FALSE`, 30) # This ends up to be 30 after doubling and removing NA rows.
ret <- model_data %>% tidy_rowwise(model)
ret <- model_data %>% augment_rowwise(model)
expect_true(nrow(ret) > 0)
})
test_that("binary prediction with variable_metric argument", {
test_data <- structure(
list(
`CANCELLED X` = factor(c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"), levels=c("A","N","Y","B")),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c(NA, "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(Inf, -Inf, NA, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545)), row.names = c(NA, -20L),
class = c("tbl_df", "tbl", "data.frame"), .Names = c("CANCELLED X", "Carrier Name", "CARRIER", "DISTANCE"))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
# duplicate rows to make some predictable data
# otherwise, the number of rows of the result of prediction becomes 0
test_data <- dplyr::bind_rows(test_data, test_data)
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, variable_metric="odds_ratio")
ret <- model_data %>% tidy_rowwise(model, variable_metric="odds_ratio")
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, variable_metric="coefficient")
ret <- model_data %>% tidy_rowwise(model, variable_metric="coefficient")
model_data <- build_lm.fast(test_data, `CANCELLED X`, `Carrier Name`, CARRIER, DISTANCE, model_type = "glm", smote=FALSE, variable_metric="ame")
ret <- model_data %>% tidy_rowwise(model, variable_metric="ame")
expect_true(c("ame") %in% colnames(ret))
expect_true(nrow(ret) > 0)
})
test_data <- tibble::tibble(
`CANCELLED X` = c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "N", "Y", "N"),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = factor(c("AA", "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL")), # test with factor with NA
# testing filtering of Inf, -Inf, NA here.
DISTANCE = c(10, 12, 12, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545),
ARR_TIME = c(10, 32, 321, 342, 123, 98, 10, 21, 80, 211, 121, 87, 821, 213, 213, 923, 121, 76, 34, 50),
DERAY_TIME = c(12, 42, 321, 31, 3, 43, 342, 764, 123, 43, 50, 12, 876, 12, 34, 45, 84, 25, 87, 352))
# Make target variable logical. (We will support only logical as logistic regression target.)
test_data <- test_data %>% dplyr::mutate(`CANCELLED X` = `CANCELLED X` == 'Y')
test_data$klass <- c(rep("A", 10), rep("B", 10))
test_that("add_prediction with linear regression", {
model_df <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
target_fun="log",
predictor_funs=list(ARR_TIME="log", DELAY_TIME="none", "Carrier Name"="none"),
model_type = "lm")
ret <- test_data %>% select(-DISTANCE) %>% add_prediction(model_df=model_df)
df2 <- test_data %>% select(-DISTANCE)
ret <- df2 %>% add_prediction(model_df=model_df)
expect_equal(colnames(df2), colnames(ret)[1:length(colnames(df2))]) # Check that the df2 column order is kept.
expect_error({
ret <- test_data %>% select(-DISTANCE, -ARR_TIME) %>% add_prediction(model_df=model_df)
}, regexp=".*ARR_TIME.*Columns are required for the model, but do not exist.*")
})
test_that("Linear Regression with test rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "lm",
test_rate = 0.1,
test_split_type = "ordered") # testing ordered split too.
res <- ret %>% tidy_rowwise(model)
expect_true("Carrier Name: American Airlines" %in% res$term)
res <- ret %>% tidy_rowwise(model, type="vif")
expect_true("Carrier Name" %in% res$term)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(res$`Rows`, 17)
variables <- (ret %>% tidy_rowwise(model, type="importance") %>% arrange(desc(importance)))$variable
names(variables) <- NULL
res <- ret %>% lm_partial_dependence()
expect_equal(levels(res$x_name), variables) # Factor order of the PDP should be the same as the importance.
expect_true(all(c("conf_high", "conf_low", "bin_sample_size") %in% colnames(res)))
})
})
test_that("Linear Regression with outlier filtering", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "lm",
test_rate = 0.3,
normalize_predictors = TRUE, # testing target normalization too.
target_outlier_filter_type="percentile",
target_outlier_filter_threshold=0.9) # testing outlier filter too.
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
#training_rownum <- nrow(test_data) - test_rownum
training_rownum <- nrow(ret$source.data[[1]]) - test_rownum
suppressWarnings({
pred_new <- ret %>% prediction(data="newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
expect_equal(res$`Rows`, 12)
})
})
test_that("Group Linear Regression with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "lm",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- ret %>% prediction(data="newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("GLM - Normal Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% evaluate_lm_training_and_test(pretty.name=TRUE)
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
})
})
test_that("Group GLM - Normal Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
})
})
test_that("GLM - Gamma Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "Gamma",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
res <- prediction(ret, data = "training_and_test", pretty.name=TRUE)
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("Group GLM - Gamma Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "Gamma",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("GLM - Inverse Gaussian Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "inverse.gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("Group GLM - Inverse Gaussian Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "inverse.gaussian",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("add_prediction with poisson regression", {
model_df <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
predictor_funs=list(ARR_TIME="log", DELAY_TIME="none", "Carrier Name"="none"),
model_type = "glm",
family = "poisson",
importance_measure="firm")
ret <- test_data %>% select(-DISTANCE) %>% add_prediction(model_df=model_df)
})
test_that("GLM - poisson Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "poisson",
test_rate = 0.1,
importance_measure="firm")
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
})
})
test_that("Group GLM - Poisson Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "poisson",
test_rate = 0.3)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
res <- ret %>% lm_partial_dependence()
})
})
test_that("GLM - Negative Binomial Destribution with test_rate", {
ret <- test_data %>% build_lm.fast(`DISTANCE`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
model_type = "glm",
family = "negativebinomial",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low","conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("Carrier Name", "DISTANCE", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
res <- ret %>% lm_partial_dependence()
})
})
test_that("Group GLM - Negative Binomial Destribution with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`DISTANCE`,
`ARR_TIME`,
model_type = "glm",
family = "negativebinomial",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low","conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "DISTANCE", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% tidy_rowwise(model, type="permutation_importance")
res <- ret %>% lm_partial_dependence()
})
})
test_that("add_prediction with logistic regression", {
model_df <- test_data %>% build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
predictor_funs=list(ARR_TIME="log", DELAY_TIME="none", "Carrier Name"="none"),
model_type = "glm",
importance_measure="firm")
ret <- test_data %>% select(-`CANCELLED X`) %>% add_prediction(model_df=model_df)
expect_true(all(c("predicted_probability", "linear_predictor","predicted_label") %in% colnames(ret)))
})
test_that("Logistic Regression with test_rate", {
ret <- test_data %>% build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
`DERAY_TIME`,
`Carrier Name`,
family = "binomial",
model_type = "glm",
test_rate = 0.1)
expect_equal(colnames(ret), c("model", ".test_index", "source.data"))
test_rownum <- length(ret$.test_index[[1]])
training_rownum <- nrow(test_data) - test_rownum
variables <- (ret %>% tidy_rowwise(model, type="importance") %>% arrange(desc(importance)))$variable
names(variables) <- NULL
res <- ret %>% lm_partial_dependence()
expect_equal(levels(res$x_name), variables) # Factor order of the PDP should be the same as the importance.
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=test_data)
pred_training_and_test <- ret %>% prediction_binary(data = 'training_and_test', threshold = 0.5)
pred_training_and_test_conf_mat <- ret %>% prediction_training_and_test(prediction_type = 'conf_mat', threshold = 0.5)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(training_rownum, nrow(pred_training))
expect_equal(test_rownum, nrow(pred_test))
expected_cols <- c("CANCELLED X", "Carrier Name", "ARR_TIME", "DERAY_TIME",
"predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat",
"residual_standard_deviation", "cooks_distance", "predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("CANCELLED X", "Carrier Name", "ARR_TIME", "DERAY_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% tidy_rowwise(model, pretty.name=TRUE)
expected_cols <- c("Term", "Coefficient", "Std Error", "t Value", "P Value", "Conf High", "Conf Low", "Odds Ratio", "Base Level")
expect_true(all(expected_cols %in% colnames(res)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
res <- ret %>% evaluate_binary_training_and_test(`CANCELLED X`, threshold = 0.5, pretty.name=TRUE)
expect_equal(nrow(res), 2) # 2 for training and test.
res <- ret %>% lm_partial_dependence()
})
})
test_that("Group Logistic Regression with test_rate", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>%
build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
model_type = "glm",
family = "binomial",
link = "logit",
test_rate = 0.1)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
# Since broom 0.7.0, I sometimes see "residuals" missing here, but not consistently. Will keep watching.
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
})
test_that("Group Logistic Regression with test_rate with weight", {
group_data <- test_data %>% group_by(klass)
ret <- group_data %>% mutate(Weight=sin(1:n())+1) %>%
build_lm.fast(`CANCELLED X`,
`ARR_TIME`,
weight=`Weight`,
model_type = "glm",
family = "binomial",
link = "logit",
test_rate = 0.1)
# Check the numbers so that we can detect any change in broom or stats in the future.
expect_equal((ret %>% tidy_rowwise(model))$estimate, c(-24.840867308, 0.001245984, -1.104902459, -0.002945304), tolerance = 0.001)
expect_equal(colnames(ret), c("klass", "model", ".test_index", "source.data"))
group_nrows <- group_data %>% summarize(n=n()) %>% `[[`("n")
test_nrows <- sapply(ret$.test_index, length, simplify=TRUE)
training_nrows <- group_nrows - test_nrows
suppressWarnings({
pred_new <- prediction(ret, data = "newdata", data_frame=group_data)
pred_training <- prediction(ret, data = "training")
pred_test <- prediction(ret, data = "test")
expect_equal(pred_training %>% summarize(n=n()) %>% `[[`("n"),
training_nrows)
expect_equal(pred_test %>% summarize(n=n()) %>% `[[`("n"),
test_nrows)
# Since broom 0.7.0, I sometimes see "residuals" missing here, but not consistently. Will keep watching.
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"residuals", "standardised_residuals", "hat", "residual_standard_deviation",
"cooks_distance", "predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_training)))
expected_cols <- c("klass", "CANCELLED X", "ARR_TIME", "predicted_value",
"conf_low", "conf_high",
"standard_error",
"predicted_response", "predicted_label")
expect_true(all(expected_cols %in% colnames(pred_test)))
res <- ret %>% glance_rowwise(model, pretty.name=TRUE)
})
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.r
\name{limitCovariatesToPopulation}
\alias{limitCovariatesToPopulation}
\title{function to limit covariates of plpData to population}
\usage{
limitCovariatesToPopulation(covariates, rowIds)
}
\description{
function to limit covariates of plpData to population
}
| /man/limitCovariatesToPopulation.Rd | no_license | JaehyeongCho/Argos | R | false | true | 348 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.r
\name{limitCovariatesToPopulation}
\alias{limitCovariatesToPopulation}
\title{function to limit covariates of plpData to population}
\usage{
limitCovariatesToPopulation(covariates, rowIds)
}
\description{
function to limit covariates of plpData to population
}
|
#' <Add Title>
#'
#' <Add Description>
#'
#' @import htmlwidgets
#'
#' @export
rpivotTable <- function(message = NULL, width = NULL, height = NULL) {
# forward options using x
x = message
# list(
# message = message
# )
# create widget
htmlwidgets::createWidget(
name = 'rpivotTable',
x,
width = width,
height = height,
package = 'rpivotTable'
)
}
#' Widget output function for use in Shiny
#'
#' @export
rpivotTableOutput <- function(outputId, width = '100%', height = '400px'){
shinyWidgetOutput(outputId, 'rpivotTable', width, height, package = 'rpivotTable')
}
#' Widget render function for use in Shiny
#'
#' @export
renderRpivotTable <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, rpivotTableOutput, env, quoted = TRUE)
}
| /R/rpivotTable.R | no_license | arturochian/rpivotTable | R | false | false | 870 | r | #' <Add Title>
#'
#' <Add Description>
#'
#' @import htmlwidgets
#'
#' @export
rpivotTable <- function(message = NULL, width = NULL, height = NULL) {
# forward options using x
x = message
# list(
# message = message
# )
# create widget
htmlwidgets::createWidget(
name = 'rpivotTable',
x,
width = width,
height = height,
package = 'rpivotTable'
)
}
#' Widget output function for use in Shiny
#'
#' @export
rpivotTableOutput <- function(outputId, width = '100%', height = '400px'){
shinyWidgetOutput(outputId, 'rpivotTable', width, height, package = 'rpivotTable')
}
#' Widget render function for use in Shiny
#'
#' @export
renderRpivotTable <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, rpivotTableOutput, env, quoted = TRUE)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(1.42266834764401e+82, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615775852-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(1.42266834764401e+82, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dprime.R
\name{dprime}
\alias{dprime}
\title{Dprime and Other Signal Detection Theory indices.}
\usage{
dprime(n_hit, n_fa, n_miss = NULL, n_cr = NULL, n_targets = NULL,
n_distractors = NULL, adjusted = TRUE)
}
\arguments{
\item{n_hit}{Number of hits.}
\item{n_fa}{Number of false alarms.}
\item{n_miss}{Number of misses.}
\item{n_cr}{Number of correct rejections.}
\item{n_targets}{Number of targets (n_hit + n_miss).}
\item{n_distractors}{Number of distractors (n_fa + n_cr).}
\item{adjusted}{Should it use the Hautus (1995) adjustments for extreme values.}
}
\value{
Calculates the d', the beta, the A' and the B''D based on the signal detection theory (SRT). See Pallier (2002) for the algorithms.
Returns a list containing 4 objects:
\itemize{
\item{\strong{dprime (d')}: }{The sensitivity. Reflects the distance between the two distributions: signal, and signal+noise and corresponds to the Z value of the hit-rate minus that of the false-alarm rate.}
\item{\strong{beta}: }{The bias (criterion). The value for beta is the ratio of the normal density functions at the criterion of the Z values used in the computation of d'. This reflects an observer's bias to say 'yes' or 'no' with the unbiased observer having a value around 1.0. As the bias to say 'yes' increases (liberal), resulting in a higher hit-rate and false-alarm-rate, beta approaches 0.0. As the bias to say 'no' increases (conservative), resulting in a lower hit-rate and false-alarm rate, beta increases over 1.0 on an open-ended scale.}
\item{\strong{aprime (A')}: }{Non-parametric estimate of discriminability. An A' near 1.0 indicates good discriminability, while a value near 0.5 means chance performance.}
\item{\strong{bppd (B''D)}: }{Non-parametric estimate of bias. A B''D equal to 0.0 indicates no bias, positive numbers represent conservative bias (i.e., a tendency to answer 'no'), negative numbers represent liberal bias (i.e. a tendency to answer 'yes'). The maximum absolute value is 1.0.}
\item{\strong{c}: }{Another index of bias. the number of standard deviations from the midpoint between these two distributions, i.e., a measure on a continuum from "conservative" to "liberal".}
}
Note that for d' and beta, adjustement for extreme values are made following the recommandations of Hautus (1995).
}
\description{
Computes Signal Detection Theory indices (d', beta, A', B''D, c).
}
\examples{
library(psycho)
n_hit <- 9
n_fa <- 2
n_miss <- 1
n_cr <- 7
indices <- psycho::dprime(n_hit, n_fa, n_miss, n_cr)
df <- data.frame(Participant = c("A", "B", "C"),
n_hit = c(1, 2, 5),
n_fa = c(6, 8, 1))
indices <- psycho::dprime(n_hit=df$n_hit,
n_fa=df$n_fa,
n_targets=10,
n_distractors=10,
adjusted=FALSE)
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
| /man/dprime.Rd | permissive | HugoNjb/psycho.R | R | false | true | 2,889 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dprime.R
\name{dprime}
\alias{dprime}
\title{Dprime and Other Signal Detection Theory indices.}
\usage{
dprime(n_hit, n_fa, n_miss = NULL, n_cr = NULL, n_targets = NULL,
n_distractors = NULL, adjusted = TRUE)
}
\arguments{
\item{n_hit}{Number of hits.}
\item{n_fa}{Number of false alarms.}
\item{n_miss}{Number of misses.}
\item{n_cr}{Number of correct rejections.}
\item{n_targets}{Number of targets (n_hit + n_miss).}
\item{n_distractors}{Number of distractors (n_fa + n_cr).}
\item{adjusted}{Should it use the Hautus (1995) adjustments for extreme values.}
}
\value{
Calculates the d', the beta, the A' and the B''D based on the signal detection theory (SRT). See Pallier (2002) for the algorithms.
Returns a list containing 4 objects:
\itemize{
\item{\strong{dprime (d')}: }{The sensitivity. Reflects the distance between the two distributions: signal, and signal+noise and corresponds to the Z value of the hit-rate minus that of the false-alarm rate.}
\item{\strong{beta}: }{The bias (criterion). The value for beta is the ratio of the normal density functions at the criterion of the Z values used in the computation of d'. This reflects an observer's bias to say 'yes' or 'no' with the unbiased observer having a value around 1.0. As the bias to say 'yes' increases (liberal), resulting in a higher hit-rate and false-alarm-rate, beta approaches 0.0. As the bias to say 'no' increases (conservative), resulting in a lower hit-rate and false-alarm rate, beta increases over 1.0 on an open-ended scale.}
\item{\strong{aprime (A')}: }{Non-parametric estimate of discriminability. An A' near 1.0 indicates good discriminability, while a value near 0.5 means chance performance.}
\item{\strong{bppd (B''D)}: }{Non-parametric estimate of bias. A B''D equal to 0.0 indicates no bias, positive numbers represent conservative bias (i.e., a tendency to answer 'no'), negative numbers represent liberal bias (i.e. a tendency to answer 'yes'). The maximum absolute value is 1.0.}
\item{\strong{c}: }{Another index of bias. the number of standard deviations from the midpoint between these two distributions, i.e., a measure on a continuum from "conservative" to "liberal".}
}
Note that for d' and beta, adjustement for extreme values are made following the recommandations of Hautus (1995).
}
\description{
Computes Signal Detection Theory indices (d', beta, A', B''D, c).
}
\examples{
library(psycho)
n_hit <- 9
n_fa <- 2
n_miss <- 1
n_cr <- 7
indices <- psycho::dprime(n_hit, n_fa, n_miss, n_cr)
df <- data.frame(Participant = c("A", "B", "C"),
n_hit = c(1, 2, 5),
n_fa = c(6, 8, 1))
indices <- psycho::dprime(n_hit=df$n_hit,
n_fa=df$n_fa,
n_targets=10,
n_distractors=10,
adjusted=FALSE)
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
#' d1_upload
#'
#' upload an object to dataone
#' @param object new data file to be uploaded
#' @param uid the user id of the data maintainer
#' @param id what identifier should be used for the object; default will try and guess from object metadata (e.g. EML metadata).
#' @param cert path to the x509 certificate from https://cilogon.org/?skin=DataONE
#' @param node The URL to the DataONE node we intend to update. Defaults to the KNB
#' @param sysmeta the required system metadata for the package, geranted by default.
#' @return httr::response object indicating the success or failure of the call
#' @import httr
#' @examples
#' \dontrun{
#' f <- system.file("doc", "reml_example.xml", package="EML")
#' d1_upload(f, "boettiger", id=uuid::UUIDgenerate(), node = knb_test)
#' }
#' @export
d1_upload <- function(object,
uid,
id = getid("extract", object),
cert = "/tmp/x509up_u1000",
node = "https://knb.ecoinformatics.org/knb/d1/mn/v1",
sysmeta = write_sysmeta(object, uid=uid, id=id)){
url <- paste0(node, "/object")
body <- list(pid = id,
object = upload_file(object),
sysmeta = upload_file(sysmeta))
POST(url,
body = body,
config=config(sslcert = cert))
}
## tests:
### FIXME use the session variable to avoid re-authenticating...
#
#
### WORKS:
#node = knb_test
#cert = "/tmp/x509up_u1000"
### Ping server
#GET(paste0(node, "/monitor/ping"))
### Reserve an ID
#POST(paste0(node, "/generate"), list(scheme="uuid"), config=config(sslcert = cert))
#
#
#library(tools)
#f <- system.file("doc", "reml_example.xml", package="reml")
#md5sum(f)
#
| /R/d1_upload.R | no_license | cboettig/dataone-lite | R | false | false | 1,715 | r | #' d1_upload
#'
#' upload an object to dataone
#' @param object new data file to be uploaded
#' @param uid the user id of the data maintainer
#' @param id what identifier should be used for the object; default will try and guess from object metadata (e.g. EML metadata).
#' @param cert path to the x509 certificate from https://cilogon.org/?skin=DataONE
#' @param node The URL to the DataONE node we intend to update. Defaults to the KNB
#' @param sysmeta the required system metadata for the package, geranted by default.
#' @return httr::response object indicating the success or failure of the call
#' @import httr
#' @examples
#' \dontrun{
#' f <- system.file("doc", "reml_example.xml", package="EML")
#' d1_upload(f, "boettiger", id=uuid::UUIDgenerate(), node = knb_test)
#' }
#' @export
d1_upload <- function(object,
uid,
id = getid("extract", object),
cert = "/tmp/x509up_u1000",
node = "https://knb.ecoinformatics.org/knb/d1/mn/v1",
sysmeta = write_sysmeta(object, uid=uid, id=id)){
url <- paste0(node, "/object")
body <- list(pid = id,
object = upload_file(object),
sysmeta = upload_file(sysmeta))
POST(url,
body = body,
config=config(sslcert = cert))
}
## tests:
### FIXME use the session variable to avoid re-authenticating...
#
#
### WORKS:
#node = knb_test
#cert = "/tmp/x509up_u1000"
### Ping server
#GET(paste0(node, "/monitor/ping"))
### Reserve an ID
#POST(paste0(node, "/generate"), list(scheme="uuid"), config=config(sslcert = cert))
#
#
#library(tools)
#f <- system.file("doc", "reml_example.xml", package="reml")
#md5sum(f)
#
|
#List example: employee details
ID=c(1,2,3,4)
emp.name=c("man","rag","sha","din")
num.emp=4
emp.list=list(ID,emp.name,num.emp)
print(emp.list)
#accessing components(by names)
print(emp.list[[1]])
print(emp.list[[2]])
print(emp.list[[2]][1])
print(emp.list$Names)
#Manipulating list
emp.list[[2]][5]="Nir"
emp.list[[1]][5]=5
print(emp.list)
#concatenation of list
emp.ages=list("ages"=c(23,54,30,32))
emp.list=c(emp.list,emp.ages)
print(emp.list) | /scr.R | no_license | wascodigama/hello-programs | R | false | false | 463 | r | #List example: employee details
ID=c(1,2,3,4)
emp.name=c("man","rag","sha","din")
num.emp=4
emp.list=list(ID,emp.name,num.emp)
print(emp.list)
#accessing components(by names)
print(emp.list[[1]])
print(emp.list[[2]])
print(emp.list[[2]][1])
print(emp.list$Names)
#Manipulating list
emp.list[[2]][5]="Nir"
emp.list[[1]][5]=5
print(emp.list)
#concatenation of list
emp.ages=list("ages"=c(23,54,30,32))
emp.list=c(emp.list,emp.ages)
print(emp.list) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glance_data.R
\name{glance_data}
\alias{glance_data}
\title{Glance Data}
\usage{
glance_data(x, limit2tally = 20)
}
\arguments{
\item{x}{A dataframe with named columns.}
\item{limit2tally}{One of the summaries is a tally of the distinct
values on each column. If there are too many different values
in a column, this summary would be meaningless. This
\code{limit2tally} is the limit of distinct values to
tally. If there are more than that it returns
"Too many unique values".}
}
\value{
A \code{tibble}.
}
\description{
Provides a summary of data with the the following columns:
\describe{
\item{\code{name}}{Name of the column.}
\item{\code{type}}{Type of the column, equal to "numerical",
"logical", "factor", "categorical", or "NA only".}
\item{\code{distinct_values}}{Count of distinct values. It ignores
NA values. Thus, if a columns only has NAs, then the value of this
field will be zero.}
\item{\code{minimum}}{Minimum of numerical columns excluding NA
values.}
\item{\code{median}}{Median of numerical columns excluding NA
values.}
\item{\code{maximum}}{Maximum of numerical columns excluding NA
values.}
\item{\code{mean}}{Mean of numerical variables. It ignores NAs.}
\item{\code{sd}}{Standard deviation of numerical variables. It
ignores NAs.}
\item{\code{na_proportion}}{Proportion of NAs.}
\item{\code{count}}{Tally of values if the column has 5 values at
most. This value (5) can be modified with the parameter
\code{limit2tally}.}
\item{\code{sample_values}}{Sample of (different) values in each
column.}
}
}
\examples{
glance_data(iris)
}
\author{
Guillermo Basulto-Elias
}
| /man/glance_data.Rd | no_license | cran/glancedata | R | false | true | 1,673 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glance_data.R
\name{glance_data}
\alias{glance_data}
\title{Glance Data}
\usage{
glance_data(x, limit2tally = 20)
}
\arguments{
\item{x}{A dataframe with named columns.}
\item{limit2tally}{One of the summaries is a tally of the distinct
values on each column. If there are too many different values
in a column, this summary would be meaningless. This
\code{limit2tally} is the limit of distinct values to
tally. If there are more than that it returns
"Too many unique values".}
}
\value{
A \code{tibble}.
}
\description{
Provides a summary of data with the the following columns:
\describe{
\item{\code{name}}{Name of the column.}
\item{\code{type}}{Type of the column, equal to "numerical",
"logical", "factor", "categorical", or "NA only".}
\item{\code{distinct_values}}{Count of distinct values. It ignores
NA values. Thus, if a columns only has NAs, then the value of this
field will be zero.}
\item{\code{minimum}}{Minimum of numerical columns excluding NA
values.}
\item{\code{median}}{Median of numerical columns excluding NA
values.}
\item{\code{maximum}}{Maximum of numerical columns excluding NA
values.}
\item{\code{mean}}{Mean of numerical variables. It ignores NAs.}
\item{\code{sd}}{Standard deviation of numerical variables. It
ignores NAs.}
\item{\code{na_proportion}}{Proportion of NAs.}
\item{\code{count}}{Tally of values if the column has 5 values at
most. This value (5) can be modified with the parameter
\code{limit2tally}.}
\item{\code{sample_values}}{Sample of (different) values in each
column.}
}
}
\examples{
glance_data(iris)
}
\author{
Guillermo Basulto-Elias
}
|
# loading the data set
bank_full<-read.csv(file.choose())
summary(bank_full) # basic statistics and business movement decessions
str(bank_full)
attach(bank_full)
plot(bank_full$y) #visuvalization on yes and no senario wether client has term deposit taken or not
model<- glm(y~.,data=bank_full,family = "binomial")
summary(model)
prob<-predict(model,type = c("response"),bank_full)
prob
confusion<- table(prob>0.5,bank_full$y)
confusion
#model accuracy
accuracy<-sum(diag(confusion)/sum(confusion))
accuracy # 90%
install.packages("ROCR")
library(ROCR)
rocrpred<-prediction(prob,bank_full$y)
rocrperf<-performance(rocrpred,"tpr","fpr")
rocrperf2<-performance(rocrpred,measure = "auc")
?performance
plot(rocrperf,colorize=T)
| /ExcelR-solution-assignments-/logistic regression/bank_full(logistic regression).R | no_license | jinka161997/ExcelR-solution-assignments- | R | false | false | 764 | r | # loading the data set
bank_full<-read.csv(file.choose())
summary(bank_full) # basic statistics and business movement decessions
str(bank_full)
attach(bank_full)
plot(bank_full$y) #visuvalization on yes and no senario wether client has term deposit taken or not
model<- glm(y~.,data=bank_full,family = "binomial")
summary(model)
prob<-predict(model,type = c("response"),bank_full)
prob
confusion<- table(prob>0.5,bank_full$y)
confusion
#model accuracy
accuracy<-sum(diag(confusion)/sum(confusion))
accuracy # 90%
install.packages("ROCR")
library(ROCR)
rocrpred<-prediction(prob,bank_full$y)
rocrperf<-performance(rocrpred,"tpr","fpr")
rocrperf2<-performance(rocrpred,measure = "auc")
?performance
plot(rocrperf,colorize=T)
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~sex + poverty, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
| /mepstrends/hc_use/json/code/r/meanEXP__sex__poverty__.r | permissive | RandomCriticalAnalysis/MEPS-summary-tables | R | false | false | 1,509 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~sex + poverty, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
|
#Importing the dataset
dataset = read.csv('Salary_Data.csv')
#dataset = dataset[, 2:3]
#Splitting data into training and test set
#install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#Feature scaling
#training_set[, 2:3] = scale(training_set[, 2:3])
#test_set[, 2:3] = scale(test_set[, 2:3])
#Fitting simple linear regression to the training set
regressor = lm(formula = Salary ~ YearsExperience,
data = training_set)
#Predicting the test set results
y_pred = predict(regressor, newdata = test_set)
#Visualizing the training set results
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = 'red')+
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary Vs Experience (Training Set)') +
xlab('Years of Experience') +
ylab('Salary')
#Visualizing the test set results
library(ggplot2)
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y = test_set$Salary),
colour = 'red')+
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary Vs Experience (Test Set)') +
xlab('Years of Experience') +
ylab('Salary') | /Part 2 - Regression/Section 4 - Simple Linear Regression/data_preprocessing_template.R | no_license | snehpahilwani/udemy-python-machinelearningaz | R | false | false | 1,501 | r | #Importing the dataset
dataset = read.csv('Salary_Data.csv')
#dataset = dataset[, 2:3]
#Splitting data into training and test set
#install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#Feature scaling
#training_set[, 2:3] = scale(training_set[, 2:3])
#test_set[, 2:3] = scale(test_set[, 2:3])
#Fitting simple linear regression to the training set
regressor = lm(formula = Salary ~ YearsExperience,
data = training_set)
#Predicting the test set results
y_pred = predict(regressor, newdata = test_set)
#Visualizing the training set results
#install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = 'red')+
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary Vs Experience (Training Set)') +
xlab('Years of Experience') +
ylab('Salary')
#Visualizing the test set results
library(ggplot2)
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y = test_set$Salary),
colour = 'red')+
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue') +
ggtitle('Salary Vs Experience (Test Set)') +
xlab('Years of Experience') +
ylab('Salary') |
predict.fRegress <- function(object, newdata=NULL, se.fit = FALSE,
interval = c("none", "confidence", "prediction"),
level = 0.95, ...){
##
## 1. fit ???
##
if(is.null(newdata))
pred <- object$yhatfdobj
else{
nx <- length(object$xfdlist)
Nnew <- length(newdata)
pred <- rep(0, Nnew)
for(i in 1:nx){
xi <- predict(object$xfdlist[[i]], newdata)
bi <- predict(object$betaestlist[[i]], newdata)
pred <- pred+bi*xi
}
}
##
## 2. Need se.fit?
##
int <- match.arg(interval)
need.se <- (se.fit || (int != "none"))
if(!need.se)return(pred)
#
stop('Need se.fit; not implemented yet')
}
residuals.fRegress <- function(object, ...){
object$yfdPar - predict(object, ...)
}
| /R/predict.fRegress.R | no_license | bonniewan/fda | R | false | false | 731 | r | predict.fRegress <- function(object, newdata=NULL, se.fit = FALSE,
interval = c("none", "confidence", "prediction"),
level = 0.95, ...){
##
## 1. fit ???
##
if(is.null(newdata))
pred <- object$yhatfdobj
else{
nx <- length(object$xfdlist)
Nnew <- length(newdata)
pred <- rep(0, Nnew)
for(i in 1:nx){
xi <- predict(object$xfdlist[[i]], newdata)
bi <- predict(object$betaestlist[[i]], newdata)
pred <- pred+bi*xi
}
}
##
## 2. Need se.fit?
##
int <- match.arg(interval)
need.se <- (se.fit || (int != "none"))
if(!need.se)return(pred)
#
stop('Need se.fit; not implemented yet')
}
residuals.fRegress <- function(object, ...){
object$yfdPar - predict(object, ...)
}
|
#!/usr/local/bin/Rscript --vanilla
# compiles all .Rmd files in _R directory into .md files in blog directory,
# if the input file is older than the output file.
# run ./knitpages.R to update all knitr files that need to be updated.
# run this script from your base content directory
library(knitr)
#' Knit Post
#'
#' This function converts .Rmd files in a directory to .md files in another directory
#' @param input input .Rmd
#' @param outfile output .md
#' @param figsfolder where figures will be
#' @param cachefolder idk
#' @param base.url the base directory
#' @keywords knit
#' @export
KnitPost <- function(input, outfile, figsfolder, cachefolder, base.url = "/") {
opts_knit$set(base.url = base.url)
fig.path <- paste0(figsfolder, sub(".Rmd$", "", basename(input)), "/")
cache.path <- file.path(cachefolder, sub(".Rmd$", "", basename(input)), "/")
opts_chunk$set(fig.path = fig.path, cache.path = cache.path, fig.cap = "center")
render_markdown()
knit(input, outfile, envir = parent.frame())
}
knit_folder <- function(infolder, outfolder = "posts/", figsfolder = "static/", cachefolder = "_caches", force = F) {
for (infile in list.files(infolder, pattern = "*.Rmd", full.names = TRUE, recursive = TRUE)) {
print(infile)
outfile = paste0(outfolder, "/", sub(".Rmd$", ".md", basename(infile)))
print(outfile)
# knit only if the input file is the last one modified
if (!file.exists(outfile) | file.info(infile)$mtime > file.info(outfile)$mtime) {
KnitPost(infile, outfile, figsfolder, cachefolder)
}
}
}
| /maazinr/R/knitpages.R | no_license | maazinansari/maazinansari | R | false | false | 1,612 | r | #!/usr/local/bin/Rscript --vanilla
# compiles all .Rmd files in _R directory into .md files in blog directory,
# if the input file is older than the output file.
# run ./knitpages.R to update all knitr files that need to be updated.
# run this script from your base content directory
library(knitr)
#' Knit Post
#'
#' This function converts .Rmd files in a directory to .md files in another directory
#' @param input input .Rmd
#' @param outfile output .md
#' @param figsfolder where figures will be
#' @param cachefolder idk
#' @param base.url the base directory
#' @keywords knit
#' @export
KnitPost <- function(input, outfile, figsfolder, cachefolder, base.url = "/") {
opts_knit$set(base.url = base.url)
fig.path <- paste0(figsfolder, sub(".Rmd$", "", basename(input)), "/")
cache.path <- file.path(cachefolder, sub(".Rmd$", "", basename(input)), "/")
opts_chunk$set(fig.path = fig.path, cache.path = cache.path, fig.cap = "center")
render_markdown()
knit(input, outfile, envir = parent.frame())
}
knit_folder <- function(infolder, outfolder = "posts/", figsfolder = "static/", cachefolder = "_caches", force = F) {
for (infile in list.files(infolder, pattern = "*.Rmd", full.names = TRUE, recursive = TRUE)) {
print(infile)
outfile = paste0(outfolder, "/", sub(".Rmd$", ".md", basename(infile)))
print(outfile)
# knit only if the input file is the last one modified
if (!file.exists(outfile) | file.info(infile)$mtime > file.info(outfile)$mtime) {
KnitPost(infile, outfile, figsfolder, cachefolder)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single_effect_regression.R
\name{single_effect_regression}
\alias{single_effect_regression}
\title{Bayesian single-effect linear regression of Y on X}
\usage{
single_effect_regression(Y, X, sa2 = 1, s2 = 1, optimize_sa2 = FALSE)
}
\arguments{
\item{Y}{an n vector}
\item{X}{an n by p matrix of covariates}
\item{sa2}{the scaled prior variance (so prior variance is sa2*s2)}
\item{s2}{the residual variance}
}
\value{
a list with elements: \cr
\item{alpha}{vector of posterior inclusion probabilities. ie alpha[i] is posterior probability that
that b[i] is non-zero}
\item{mu}{vector of posterior means (conditional on inclusion)}
\item{mu2}{vector of posterior second moments (conditional on inclusion)}
\item{bf}{vector of Bayes factors for each variable}
}
\description{
Bayesian single-effect linear regression of Y on X
}
\details{
Performs single-effect linear regression of Y on X. That is, this function
fits the regression model Y= Xb + e, where elements of e are iid N(0,s2) and the
b is a p vector of effects to be estimated.
The assumption is that b has exactly one non-zero element, with all elements
equally likely to be non-zero. The prior on the non-zero element is N(0,var=sa2*s2).
}
| /man/single_effect_regression.Rd | no_license | jhmarcus/susieR | R | false | true | 1,282 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single_effect_regression.R
\name{single_effect_regression}
\alias{single_effect_regression}
\title{Bayesian single-effect linear regression of Y on X}
\usage{
single_effect_regression(Y, X, sa2 = 1, s2 = 1, optimize_sa2 = FALSE)
}
\arguments{
\item{Y}{an n vector}
\item{X}{an n by p matrix of covariates}
\item{sa2}{the scaled prior variance (so prior variance is sa2*s2)}
\item{s2}{the residual variance}
}
\value{
a list with elements: \cr
\item{alpha}{vector of posterior inclusion probabilities. ie alpha[i] is posterior probability that
that b[i] is non-zero}
\item{mu}{vector of posterior means (conditional on inclusion)}
\item{mu2}{vector of posterior second moments (conditional on inclusion)}
\item{bf}{vector of Bayes factors for each variable}
}
\description{
Bayesian single-effect linear regression of Y on X
}
\details{
Performs single-effect linear regression of Y on X. That is, this function
fits the regression model Y= Xb + e, where elements of e are iid N(0,s2) and the
b is a p vector of effects to be estimated.
The assumption is that b has exactly one non-zero element, with all elements
equally likely to be non-zero. The prior on the non-zero element is N(0,var=sa2*s2).
}
|
\name{soboltouati}
\alias{soboltouati}
\alias{tell.soboltouati}
\alias{print.soboltouati}
\alias{plot.soboltouati}
\alias{ggplot.soboltouati}
\title{Monte Carlo Estimation of Sobol' Indices (formulas of Martinez (2011) and Touati (2016))}
\description{
\code{soboltouati} implements the Monte Carlo estimation of
the Sobol' indices for both first-order and total indices using
correlation coefficients-based formulas, at a total cost of
\eqn{(p+2) \times n}{(p + 2) * n} model evaluations.
These are called the Martinez estimators. It also computes their
confidence intervals based on asymptotic properties of empirical
correlation coefficients.
}
\usage{
soboltouati(model = NULL, X1, X2, conf = 0.95, \dots)
\method{tell}{soboltouati}(x, y = NULL, return.var = NULL, \dots)
\method{print}{soboltouati}(x, \dots)
\method{plot}{soboltouati}(x, ylim = c(0, 1), \dots)
\method{ggplot}{soboltouati}(data, mapping = aes(), ylim = c(0, 1), \dots, environment
= parent.frame())
}
\arguments{
\item{model}{a function, or a model with a \code{predict} method,
defining the model to analyze.}
\item{X1}{the first random sample.}
\item{X2}{the second random sample.}
\item{conf}{the confidence level for confidence intervals, or zero to
avoid their computation if they are not needed.}
\item{x}{a list of class \code{"soboltouati"} storing the state of the
sensitivity study (parameters, data, estimates).}
\item{data}{a list of class \code{"soboltouati"} storing the state of the
sensitivity study (parameters, data, estimates).}
\item{y}{a vector of model responses.}
\item{return.var}{a vector of character strings giving further
internal variables names to store in the output object \code{x}.}
\item{ylim}{y-coordinate plotting limits.}
\item{mapping}{Default list of aesthetic mappings to use for plot. If not specified,
must be supplied in each layer added to the plot.}
\item{environment}{[Deprecated] Used prior to tidy evaluation.}
\item{\dots}{any other arguments for \code{model} which are passed
unchanged each time it is called}
}
\value{
\code{soboltouati} returns a list of class \code{"soboltouati"},
containing all the input arguments detailed before, plus the following
components:
\item{call}{the matched call.}
\item{X}{a \code{data.frame} containing the design of experiments.}
\item{y}{the response used}
\item{V}{the estimations of normalized variances of the Conditional
Expectations (VCE) with respect to each factor and also with respect
to the complementary set of each factor ("all but \eqn{X_i}{Xi}").}
\item{S}{the estimations of the Sobol' first-order indices.}
\item{T}{the estimations of the Sobol' total sensitivity indices.}
}
\details{
This estimator supports missing values (NA or NaN) which can occur during the
simulation of the model on the design of experiments (due to code failure)
even if Sobol' indices are no more rigorous variance-based sensitivity
indices if missing values are present. In this case, a warning is displayed.
}
\references{
J-M. Martinez, 2011, \emph{Analyse de sensibilite globale par decomposition
de la variance}, Presentation in the meeting of GdR Ondes and GdR MASCOT-NUM,
January, 13th, 2011, Institut Henri Poincare, Paris, France.
T. Touati, 2016, Confidence intervals for Sobol' indices.
Proceedings of the SAMO 2016 Conference, Reunion Island, France, December 2016.
T. Touati, 2017, \emph{Intervalles de confiance pour les indices de Sobol},
49emes Journees de la SFdS, Avignon, France, Juin 2017.
}
\author{
Taieb Touati, Khalid Boumhaout
}
\seealso{
\code{\link{sobol}, \link{sobol2002}, \link{sobolSalt}, \link{sobol2007}, \link{soboljansen}, \link{sobolmartinez}}
}
\examples{
# Test case : the non-monotonic Sobol g-function
# The method of sobol requires 2 samples
# There are 8 factors, all following the uniform distribution
# on [0,1]
library(boot)
n <- 1000
X1 <- data.frame(matrix(runif(8 * n), nrow = n))
X2 <- data.frame(matrix(runif(8 * n), nrow = n))
# sensitivity analysis
x <- soboltouati(model = sobol.fun, X1, X2)
print(x)
plot(x)
library(ggplot2)
ggplot(x)
}
\keyword{design}
| /man/soboltouati.Rd | no_license | cran/sensitivity | R | false | false | 4,322 | rd | \name{soboltouati}
\alias{soboltouati}
\alias{tell.soboltouati}
\alias{print.soboltouati}
\alias{plot.soboltouati}
\alias{ggplot.soboltouati}
\title{Monte Carlo Estimation of Sobol' Indices (formulas of Martinez (2011) and Touati (2016))}
\description{
\code{soboltouati} implements the Monte Carlo estimation of
the Sobol' indices for both first-order and total indices using
correlation coefficients-based formulas, at a total cost of
\eqn{(p+2) \times n}{(p + 2) * n} model evaluations.
These are called the Martinez estimators. It also computes their
confidence intervals based on asymptotic properties of empirical
correlation coefficients.
}
\usage{
soboltouati(model = NULL, X1, X2, conf = 0.95, \dots)
\method{tell}{soboltouati}(x, y = NULL, return.var = NULL, \dots)
\method{print}{soboltouati}(x, \dots)
\method{plot}{soboltouati}(x, ylim = c(0, 1), \dots)
\method{ggplot}{soboltouati}(data, mapping = aes(), ylim = c(0, 1), \dots, environment
= parent.frame())
}
\arguments{
\item{model}{a function, or a model with a \code{predict} method,
defining the model to analyze.}
\item{X1}{the first random sample.}
\item{X2}{the second random sample.}
\item{conf}{the confidence level for confidence intervals, or zero to
avoid their computation if they are not needed.}
\item{x}{a list of class \code{"soboltouati"} storing the state of the
sensitivity study (parameters, data, estimates).}
\item{data}{a list of class \code{"soboltouati"} storing the state of the
sensitivity study (parameters, data, estimates).}
\item{y}{a vector of model responses.}
\item{return.var}{a vector of character strings giving further
internal variables names to store in the output object \code{x}.}
\item{ylim}{y-coordinate plotting limits.}
\item{mapping}{Default list of aesthetic mappings to use for plot. If not specified,
must be supplied in each layer added to the plot.}
\item{environment}{[Deprecated] Used prior to tidy evaluation.}
\item{\dots}{any other arguments for \code{model} which are passed
unchanged each time it is called}
}
\value{
\code{soboltouati} returns a list of class \code{"soboltouati"},
containing all the input arguments detailed before, plus the following
components:
\item{call}{the matched call.}
\item{X}{a \code{data.frame} containing the design of experiments.}
\item{y}{the response used}
\item{V}{the estimations of normalized variances of the Conditional
Expectations (VCE) with respect to each factor and also with respect
to the complementary set of each factor ("all but \eqn{X_i}{Xi}").}
\item{S}{the estimations of the Sobol' first-order indices.}
\item{T}{the estimations of the Sobol' total sensitivity indices.}
}
\details{
This estimator supports missing values (NA or NaN) which can occur during the
simulation of the model on the design of experiments (due to code failure)
even if Sobol' indices are no more rigorous variance-based sensitivity
indices if missing values are present. In this case, a warning is displayed.
}
\references{
J-M. Martinez, 2011, \emph{Analyse de sensibilite globale par decomposition
de la variance}, Presentation in the meeting of GdR Ondes and GdR MASCOT-NUM,
January, 13th, 2011, Institut Henri Poincare, Paris, France.
T. Touati, 2016, Confidence intervals for Sobol' indices.
Proceedings of the SAMO 2016 Conference, Reunion Island, France, December 2016.
T. Touati, 2017, \emph{Intervalles de confiance pour les indices de Sobol},
49emes Journees de la SFdS, Avignon, France, Juin 2017.
}
\author{
Taieb Touati, Khalid Boumhaout
}
\seealso{
\code{\link{sobol}, \link{sobol2002}, \link{sobolSalt}, \link{sobol2007}, \link{soboljansen}, \link{sobolmartinez}}
}
\examples{
# Test case : the non-monotonic Sobol g-function
# The method of sobol requires 2 samples
# There are 8 factors, all following the uniform distribution
# on [0,1]
library(boot)
n <- 1000
X1 <- data.frame(matrix(runif(8 * n), nrow = n))
X2 <- data.frame(matrix(runif(8 * n), nrow = n))
# sensitivity analysis
x <- soboltouati(model = sobol.fun, X1, X2)
print(x)
plot(x)
library(ggplot2)
ggplot(x)
}
\keyword{design}
|
library(checkarg)
### Name: isNegativeNumberOrNaOrInfVectorOrNull
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isNegativeNumberOrNaOrInfVectorOrNull
### ** Examples
isNegativeNumberOrNaOrInfVectorOrNull(-2)
# returns TRUE (argument is valid)
isNegativeNumberOrNaOrInfVectorOrNull("X")
# returns FALSE (argument is invalid)
#isNegativeNumberOrNaOrInfVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isNegativeNumberOrNaOrInfVectorOrNull(-2, default = -1)
# returns -2 (the argument, rather than the default, since it is not NULL)
#isNegativeNumberOrNaOrInfVectorOrNull("X", default = -1)
# throws exception with message defined by message and argumentName parameters
isNegativeNumberOrNaOrInfVectorOrNull(NULL, default = -1)
# returns -1 (the default, rather than the argument, since it is NULL)
| /data/genthat_extracted_code/checkarg/examples/isNegativeNumberOrNaOrInfVectorOrNull.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 942 | r | library(checkarg)
### Name: isNegativeNumberOrNaOrInfVectorOrNull
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isNegativeNumberOrNaOrInfVectorOrNull
### ** Examples
isNegativeNumberOrNaOrInfVectorOrNull(-2)
# returns TRUE (argument is valid)
isNegativeNumberOrNaOrInfVectorOrNull("X")
# returns FALSE (argument is invalid)
#isNegativeNumberOrNaOrInfVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isNegativeNumberOrNaOrInfVectorOrNull(-2, default = -1)
# returns -2 (the argument, rather than the default, since it is not NULL)
#isNegativeNumberOrNaOrInfVectorOrNull("X", default = -1)
# throws exception with message defined by message and argumentName parameters
isNegativeNumberOrNaOrInfVectorOrNull(NULL, default = -1)
# returns -1 (the default, rather than the argument, since it is NULL)
|
setwd("./project")
# set parall compute
library(doMC) # install.packages('doMC')
registerDoMC(4) #adjust to your number of cores
library(caret)
tmp_ds<-read.csv("pml-training.csv", na.strings=c("NA","","#DIV/0!"))
# train_ds<-read.csv("pml-training.csv")
test_ds<-read.csv("pml-testing.csv", na.strings=c("NA","","#DIV/0!"))
# for each of the four sensors, get out the Euler angles and the raw accelerometer, gyroscope and
# magnetometer readings
var_list<-c(8:11,37:45,46:49,60:68,84:86,102,113:121,122:124,140,151:159)
# test
# 调整顺序,以便比较test data set的结果
test_key=test_ds[,c(2,3,4)]
test_key$user_name=gsub("^ +", "", test_key$user_name)
test_key$user_name=gsub(" +$", "", test_key$user_name)
sorted_test_ds<-test_ds[do.call(order, test_key), var_list]
all_ds<-read.csv("pml-all.csv", na.strings=c("NA","","#DIV/0!"))
sub_ds<-subset(all_ds, (user_name %in% test_ds$user_name) & (raw_timestamp_part_1 %in% test_ds$raw_timestamp_part_1) & (raw_timestamp_part_2 %in% test_ds$raw_timestamp_part_2), select=c("user_name","raw_timestamp_part_1","raw_timestamp_part_2","classe"))
sub_key=sub_ds[,c(1,2,3)]
sub_key$user_name=gsub("^ +", "", sub_key$user_name)
sub_key$user_name=gsub(" +$", "", sub_key$user_name)
sorted_test_classe<-sub_ds[ do.call(order, sub_key), "classe"]
# split data from pml-training.csv into training and cross validation dataset
set.seed(12345)
inTrain = createDataPartition(tmp_ds$classe, p = 0.75)[[1]]
train_ds = tmp_ds[ inTrain, c(var_list,160)]
valid_ds = tmp_ds[-inTrain,c(var_list,160)]
#lapply(train_ds, class)
nsv<-nearZeroVar(train_ds, saveMetrics=TRUE)
nsv # no TRUE to delete
# Random Forest
# ModFit1<-train(classe ~ ., train_ds, method="rf")
ModFit1<-randomForest(classe ~ ., train_ds, ntree=100)
vImp<-varImp(ModFit1)
vImp<-data.frame(varname=rownames(vImp), Overall=vImp$Overall, row.names=rownames(vImp))
vImp[order(vImp$Overall, decreasing=TRUE), ]
pred<-predict(ModFit1, valid_ds)
confusionMatrix(valid_ds$classe, pred)
# Accuracy : ntree=500,0.9939;
# ntree=100,0.9951
pred_test<-predict(ModFit1, sorted_test_ds)
confusionMatrix(sorted_test_classe, pred_test) # Accuracy : 1
# Random forest with pca
# ModFit2<-train(classe ~ ., train_ds, method="rf", preProcess="pca",
# trControl=trainControl(preProcOptions=list(thresh = 0.9)))
preProc<-preProcess(train_ds[,-53], method="pca",thresh=0.9) # 53 means classe
trainPC<-predict(preProc, train_ds[,-53])
ModFit2<-randomForest(train_ds$classe~.,data=trainPC)
validPC<-predict(preProc, valid_ds[,-53])
y<-predict(ModFit2, validPC)
confusionMatrix(valid_ds$classe, y) # Accuracy : 0.9689
testPC<-predict(preProc, sorted_test_ds)
y<-predict(ModFit2, testPC)
confusionMatrix(sorted_test_classe, y) # Accuracy : 1
# predict only using raw data
var_list2<-c(37:45,60:68,113:121,151:159)
train_ds2 = tmp_ds[ inTrain, c(var_list2,160)]
valid_ds2 = tmp_ds[-inTrain,c(var_list2,160)]
ModFit3<-randomForest(classe ~ ., train_ds2, ntree=500)
pred<-predict(ModFit3, valid_ds2)
confusionMatrix(valid_ds2$classe, pred)
# Accuracy : ntree=100, 0.9853
# ntree=500, 0.988
# predict only using Euler angles
var_list3<-c(8:11,46:49,84:86,102,122:124,140)
train_ds3 = tmp_ds[ inTrain, c(var_list3,160)]
valid_ds3 = tmp_ds[-inTrain,c(var_list3,160)]
ModFit4<-randomForest(classe ~ ., train_ds3, ntree=100)
pred<-predict(ModFit4, valid_ds3)
confusionMatrix(valid_ds3$classe, pred)
# Accuracy : ntree=100, 0.9916
# ntree=500, 0.9906
# write 20 files for prediction
answers = rep("A", 20)
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(answers)
pred_test<-predict(ModFit1, test_ds)
pml_write_files(pred_test)
# feature selection with Boruta
Boruta((classe ~ ., train_ds)
| /project/p2.R | no_license | gdwangh/coursera-dataScientists-8-Practical-Machine-Learning | R | false | false | 3,940 | r | setwd("./project")
# set parall compute
library(doMC) # install.packages('doMC')
registerDoMC(4) #adjust to your number of cores
library(caret)
tmp_ds<-read.csv("pml-training.csv", na.strings=c("NA","","#DIV/0!"))
# train_ds<-read.csv("pml-training.csv")
test_ds<-read.csv("pml-testing.csv", na.strings=c("NA","","#DIV/0!"))
# for each of the four sensors, get out the Euler angles and the raw accelerometer, gyroscope and
# magnetometer readings
var_list<-c(8:11,37:45,46:49,60:68,84:86,102,113:121,122:124,140,151:159)
# test
# 调整顺序,以便比较test data set的结果
test_key=test_ds[,c(2,3,4)]
test_key$user_name=gsub("^ +", "", test_key$user_name)
test_key$user_name=gsub(" +$", "", test_key$user_name)
sorted_test_ds<-test_ds[do.call(order, test_key), var_list]
all_ds<-read.csv("pml-all.csv", na.strings=c("NA","","#DIV/0!"))
sub_ds<-subset(all_ds, (user_name %in% test_ds$user_name) & (raw_timestamp_part_1 %in% test_ds$raw_timestamp_part_1) & (raw_timestamp_part_2 %in% test_ds$raw_timestamp_part_2), select=c("user_name","raw_timestamp_part_1","raw_timestamp_part_2","classe"))
sub_key=sub_ds[,c(1,2,3)]
sub_key$user_name=gsub("^ +", "", sub_key$user_name)
sub_key$user_name=gsub(" +$", "", sub_key$user_name)
sorted_test_classe<-sub_ds[ do.call(order, sub_key), "classe"]
# split data from pml-training.csv into training and cross validation dataset
set.seed(12345)
inTrain = createDataPartition(tmp_ds$classe, p = 0.75)[[1]]
train_ds = tmp_ds[ inTrain, c(var_list,160)]
valid_ds = tmp_ds[-inTrain,c(var_list,160)]
#lapply(train_ds, class)
nsv<-nearZeroVar(train_ds, saveMetrics=TRUE)
nsv # no TRUE to delete
# Random Forest
# ModFit1<-train(classe ~ ., train_ds, method="rf")
ModFit1<-randomForest(classe ~ ., train_ds, ntree=100)
vImp<-varImp(ModFit1)
vImp<-data.frame(varname=rownames(vImp), Overall=vImp$Overall, row.names=rownames(vImp))
vImp[order(vImp$Overall, decreasing=TRUE), ]
pred<-predict(ModFit1, valid_ds)
confusionMatrix(valid_ds$classe, pred)
# Accuracy : ntree=500,0.9939;
# ntree=100,0.9951
pred_test<-predict(ModFit1, sorted_test_ds)
confusionMatrix(sorted_test_classe, pred_test) # Accuracy : 1
# Random forest with pca
# ModFit2<-train(classe ~ ., train_ds, method="rf", preProcess="pca",
# trControl=trainControl(preProcOptions=list(thresh = 0.9)))
preProc<-preProcess(train_ds[,-53], method="pca",thresh=0.9) # 53 means classe
trainPC<-predict(preProc, train_ds[,-53])
ModFit2<-randomForest(train_ds$classe~.,data=trainPC)
validPC<-predict(preProc, valid_ds[,-53])
y<-predict(ModFit2, validPC)
confusionMatrix(valid_ds$classe, y) # Accuracy : 0.9689
testPC<-predict(preProc, sorted_test_ds)
y<-predict(ModFit2, testPC)
confusionMatrix(sorted_test_classe, y) # Accuracy : 1
# predict only using raw data
var_list2<-c(37:45,60:68,113:121,151:159)
train_ds2 = tmp_ds[ inTrain, c(var_list2,160)]
valid_ds2 = tmp_ds[-inTrain,c(var_list2,160)]
ModFit3<-randomForest(classe ~ ., train_ds2, ntree=500)
pred<-predict(ModFit3, valid_ds2)
confusionMatrix(valid_ds2$classe, pred)
# Accuracy : ntree=100, 0.9853
# ntree=500, 0.988
# predict only using Euler angles
var_list3<-c(8:11,46:49,84:86,102,122:124,140)
train_ds3 = tmp_ds[ inTrain, c(var_list3,160)]
valid_ds3 = tmp_ds[-inTrain,c(var_list3,160)]
ModFit4<-randomForest(classe ~ ., train_ds3, ntree=100)
pred<-predict(ModFit4, valid_ds3)
confusionMatrix(valid_ds3$classe, pred)
# Accuracy : ntree=100, 0.9916
# ntree=500, 0.9906
# write 20 files for prediction
answers = rep("A", 20)
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(answers)
pred_test<-predict(ModFit1, test_ds)
pml_write_files(pred_test)
# feature selection with Boruta
Boruta((classe ~ ., train_ds)
|
#' # Title of R Project Here
#+ knitr setup, include=FALSE
# some setup options for outputing markdown files; feel free to ignore these
# These are the default options for this report; more information about options here: https://yihui.name/knitr/options/
knitr::opts_chunk$set(eval = TRUE, # evaluate code chunks
include = TRUE, # include the console output of the code in the final document
echo = FALSE, # include the code that generated the report in the final report
warning = FALSE, # include warnings
message = FALSE, # include console messages
collapse = TRUE, # Merge code blocks and output blocks, if possible.
dpi = 300, # the default figure resolution
fig.dim = c(9, 5), # the default figure dimensions
out.width = '98%', # the default figure output width
out.height = '98%', # the default figure output height
cache = TRUE) # save the calculations so that kniting is faster each time. (Sometimes this option can cause issues and images won't reflect the most recent code changes, if this happens, just delete the *_cache folder and reknit the code.)
#+ loading libraries and set seed
library(plyr) # always load before tidyverse to avoid conflicts with dplyr
packageVersion("plyr")
library(tidyverse)
packageVersion("tidyverse")
set.seed(12345)
#' ## The start of your analyses
#' Any markdown following the ```#'``` will be interpreted as markdown.
#' For example:
#' # Header 1
#' ## Header 2
#' ### Header 3
#' #### Header 4
#' **italics**
#' _Bold_
#'
#' * This is
#' * a bulleted list
#'
#' 1. This is
#' 2. A numbered list
#'
#' Anything not prefaced by ```#'``` will be interepreted as R code.
#' For example:
random_numbers <- rnorm(n = 50)
hist(random_numbers)
| /R_analyses_template_spin_ready.R | no_license | hhollandmoritz/Useful_templates | R | false | false | 1,914 | r | #' # Title of R Project Here
#+ knitr setup, include=FALSE
# some setup options for outputing markdown files; feel free to ignore these
# These are the default options for this report; more information about options here: https://yihui.name/knitr/options/
knitr::opts_chunk$set(eval = TRUE, # evaluate code chunks
include = TRUE, # include the console output of the code in the final document
echo = FALSE, # include the code that generated the report in the final report
warning = FALSE, # include warnings
message = FALSE, # include console messages
collapse = TRUE, # Merge code blocks and output blocks, if possible.
dpi = 300, # the default figure resolution
fig.dim = c(9, 5), # the default figure dimensions
out.width = '98%', # the default figure output width
out.height = '98%', # the default figure output height
cache = TRUE) # save the calculations so that kniting is faster each time. (Sometimes this option can cause issues and images won't reflect the most recent code changes, if this happens, just delete the *_cache folder and reknit the code.)
#+ loading libraries and set seed
library(plyr) # always load before tidyverse to avoid conflicts with dplyr
packageVersion("plyr")
library(tidyverse)
packageVersion("tidyverse")
set.seed(12345)
#' ## The start of your analyses
#' Any markdown following the ```#'``` will be interpreted as markdown.
#' For example:
#' # Header 1
#' ## Header 2
#' ### Header 3
#' #### Header 4
#' **italics**
#' _Bold_
#'
#' * This is
#' * a bulleted list
#'
#' 1. This is
#' 2. A numbered list
#'
#' Anything not prefaced by ```#'``` will be interepreted as R code.
#' For example:
random_numbers <- rnorm(n = 50)
hist(random_numbers)
|
#library(doParallel)
#require(foreach)
# paralle setting
#cl <- makeCluster(2)
#registerDoParallel(cl)
# list .vcf.gz files
original_file_dir = "~/data/HLI/"
destination_dir = "~/data/HLI_filtered/"
#reference_dir = "~/data/references/hg38_ucsc.sdf"
#combined_dir = "~/data/HLI_output/combined_cases/"
filenames <- list.files(original_file_dir, pattern = "\\.vcf.gz$")
groupnames <- sapply(1:length(filenames), function(x) paste0(unlist(strsplit(filenames[x], "_"))[1:2], collapse ="_"))
unique_groups <- unique(groupnames)
num_files <- length(unique_groups)
##### need csv format of summary
#foreach(id = 1:num_files) %dopar% {
system(paste0("echo \"Starting Time: ", Sys.time(), "\" > ", destination_dir, "README"),
intern = TRUE)
for(id in 1:num_files){
index <- which(grepl(paste0(unique_groups[id], "_"), filenames))
if(length(index) == 2){
donor_file <- filenames[index[grepl('_D_', filenames[index])]]
recipient_file <- filenames[index[grepl('_R_', filenames[index])]]
donor_file_filterd <- gsub(".vcf.gz", "_filtered.vcf.gz", donor_file)
recipient_file_filtered <- gsub(".vcf.gz", "_filtered.vcf.gz", recipient_file)
donor_log <- gsub(".vcf.gz", ".out", donor_file)
recipient_log <- gsub(".vcf.gz", ".out", recipient_file)
#cat(paste0(unique_groups[id], ": \n"))
#cat(paste0(donor_file, " <-> ", recipient_file, "\n"))
# RTG_MEM=16G
system(paste0("echo \"rtg vcffilter -i ", original_file_dir, donor_file, " -o ", destination_dir,
donor_file_filterd, " -k PASS > ", destination_dir, donor_log, "\" >> ", destination_dir,"README"),
intern = TRUE)
system(paste0("rtg vcffilter -i ", original_file_dir, donor_file, " -o ", destination_dir,
donor_file_filterd, " -k PASS > ", destination_dir ,donor_log),
intern = TRUE)
system(paste0("echo \"rtg vcffilter -i ", original_file_dir, recipient_file, " -o ", destination_dir,
recipient_file_filtered, " -k PASS > ", destination_dir, recipient_log, "\" >> ", destination_dir, "README"),
intern = TRUE)
system(paste0("rtg vcffilter -i ", original_file_dir, recipient_file, " -o ", destination_dir,
recipient_file_filtered, " -k PASS > ", destination_dir, recipient_log),
intern = TRUE)
}
}
system(paste0("echo \"Finishing Time: ", Sys.time(), "\" >> ", destination_dir, "README"),
intern = TRUE)
## sanity check
#getDoParWorkers() # number of workers doing parallel for-loop
#getDoParName() # the name and version of the currently registered backend
#getDoParVersion()
#stopCluster(cl)
| /scripts/aws_ec2_RTG_vcffilter_keepPASS.R | permissive | hhuang2018/WGS_analysis | R | false | false | 2,667 | r | #library(doParallel)
#require(foreach)
# paralle setting
#cl <- makeCluster(2)
#registerDoParallel(cl)
# list .vcf.gz files
original_file_dir = "~/data/HLI/"
destination_dir = "~/data/HLI_filtered/"
#reference_dir = "~/data/references/hg38_ucsc.sdf"
#combined_dir = "~/data/HLI_output/combined_cases/"
filenames <- list.files(original_file_dir, pattern = "\\.vcf.gz$")
groupnames <- sapply(1:length(filenames), function(x) paste0(unlist(strsplit(filenames[x], "_"))[1:2], collapse ="_"))
unique_groups <- unique(groupnames)
num_files <- length(unique_groups)
##### need csv format of summary
#foreach(id = 1:num_files) %dopar% {
system(paste0("echo \"Starting Time: ", Sys.time(), "\" > ", destination_dir, "README"),
intern = TRUE)
for(id in 1:num_files){
index <- which(grepl(paste0(unique_groups[id], "_"), filenames))
if(length(index) == 2){
donor_file <- filenames[index[grepl('_D_', filenames[index])]]
recipient_file <- filenames[index[grepl('_R_', filenames[index])]]
donor_file_filterd <- gsub(".vcf.gz", "_filtered.vcf.gz", donor_file)
recipient_file_filtered <- gsub(".vcf.gz", "_filtered.vcf.gz", recipient_file)
donor_log <- gsub(".vcf.gz", ".out", donor_file)
recipient_log <- gsub(".vcf.gz", ".out", recipient_file)
#cat(paste0(unique_groups[id], ": \n"))
#cat(paste0(donor_file, " <-> ", recipient_file, "\n"))
# RTG_MEM=16G
system(paste0("echo \"rtg vcffilter -i ", original_file_dir, donor_file, " -o ", destination_dir,
donor_file_filterd, " -k PASS > ", destination_dir, donor_log, "\" >> ", destination_dir,"README"),
intern = TRUE)
system(paste0("rtg vcffilter -i ", original_file_dir, donor_file, " -o ", destination_dir,
donor_file_filterd, " -k PASS > ", destination_dir ,donor_log),
intern = TRUE)
system(paste0("echo \"rtg vcffilter -i ", original_file_dir, recipient_file, " -o ", destination_dir,
recipient_file_filtered, " -k PASS > ", destination_dir, recipient_log, "\" >> ", destination_dir, "README"),
intern = TRUE)
system(paste0("rtg vcffilter -i ", original_file_dir, recipient_file, " -o ", destination_dir,
recipient_file_filtered, " -k PASS > ", destination_dir, recipient_log),
intern = TRUE)
}
}
system(paste0("echo \"Finishing Time: ", Sys.time(), "\" >> ", destination_dir, "README"),
intern = TRUE)
## sanity check
#getDoParWorkers() # number of workers doing parallel for-loop
#getDoParName() # the name and version of the currently registered backend
#getDoParVersion()
#stopCluster(cl)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_start_contact_recording}
\alias{connect_start_contact_recording}
\title{Starts recording the contact:}
\usage{
connect_start_contact_recording(
InstanceId,
ContactId,
InitialContactId,
VoiceRecordingConfiguration
)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance. You can \href{https://docs.aws.amazon.com/connect/latest/adminguide/find-instance-arn.html}{find the instance ID}
in the Amazon Resource Name (ARN) of the instance.}
\item{ContactId}{[required] The identifier of the contact.}
\item{InitialContactId}{[required] The identifier of the contact. This is the identifier of the contact
associated with the first interaction with the contact center.}
\item{VoiceRecordingConfiguration}{[required] The person being recorded.}
}
\description{
Starts recording the contact:
See \url{https://www.paws-r-sdk.com/docs/connect_start_contact_recording/} for full documentation.
}
\keyword{internal}
| /cran/paws.customer.engagement/man/connect_start_contact_recording.Rd | permissive | paws-r/paws | R | false | true | 1,064 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_start_contact_recording}
\alias{connect_start_contact_recording}
\title{Starts recording the contact:}
\usage{
connect_start_contact_recording(
InstanceId,
ContactId,
InitialContactId,
VoiceRecordingConfiguration
)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance. You can \href{https://docs.aws.amazon.com/connect/latest/adminguide/find-instance-arn.html}{find the instance ID}
in the Amazon Resource Name (ARN) of the instance.}
\item{ContactId}{[required] The identifier of the contact.}
\item{InitialContactId}{[required] The identifier of the contact. This is the identifier of the contact
associated with the first interaction with the contact center.}
\item{VoiceRecordingConfiguration}{[required] The person being recorded.}
}
\description{
Starts recording the contact:
See \url{https://www.paws-r-sdk.com/docs/connect_start_contact_recording/} for full documentation.
}
\keyword{internal}
|
#### Test for significant changes in the predicted yield values for the climate period ####
'
- Load the tidy data
- Filter for climate models
- Filter for the reference and climate periods
- Make on data.frame including the observations for all three climate periods
- Group by comIds and apply test to each cohort
- Combine with spatial information
'
#### Input ####
'
Spatial Information: Shapefile of comdIDs ("vg2500_krs")
BasePrediction.R: tidy.data.frames of yield and yield anomaly predictions based on different estimation models
'
###################
## Load Packages ##
####################################################################################################################################################################
##############################
#### Preparation for loop ####
source("./script/script_raw/BaseModel.R")
#### Create Container to store plots in according to their predictive model #####
for (s in seq_along(nameList_climate)){
dir.create(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]] ,sep=""), showWarnings = FALSE)
}
#### Load tidy data.frame of Yield and Yield_Anomaly Predictions ####
PredictData_df_tidy <- read_csv(paste("./data/data_proj/output/Climate_predicted_allRCMs_total.csv", sep="") )
PredictData_df_tidy
#################################################################################################
#### Loop to create one data.frame for each climate period of 30 yeears, ####
#### i.e the reference period 1971 - 2000 and the climate periods 2021-2050 and 2070 - 2099 ###
##############################################################################################
## Select variables needed for test on Yield Anomalies ##
PredictData_df_tidy <- PredictData_df_tidy %>% select("RCM", "comId", "year", contains("sMA"))
#### Generate list with data.frame container for each climate period: ####
PredictData_df_tidy_test_list <- list(PredictData_df_tidy_test_1971 = data.frame(), PredictData_df_tidy_test_2021= data.frame(),
PredictData_df_tidy_test_2070 = data.frame())
#### Start of loop through three time periods ####
for (r in 1:3){
PredictData_df_tidy_test_list[[r]] <-
PredictData_df_tidy %>%
filter(year >= climateyears_list[[1]][r] & year <= climateyears_list[[2]][r])
}
str(PredictData_df_tidy_test_list[[1]], 1)
str(PredictData_df_tidy_test_list[[2]], 1)
str(PredictData_df_tidy_test_list[[3]], 1)
#### Rename y and y_anomaly of each climate period accordingly ####
names(PredictData_df_tidy_test_list[[1]])[4:7] <- paste(names(PredictData_df_tidy_test_list[[1]])[4:7] , "1971", sep="_" )
names(PredictData_df_tidy_test_list[[2]])[4:7] <- paste(names(PredictData_df_tidy_test_list[[2]])[4:7] , "2021", sep="_" )
names(PredictData_df_tidy_test_list[[3]])[4:7] <- paste(names(PredictData_df_tidy_test_list[[3]])[4:7] , "2070", sep="_" )
#### Make of large data.frane of the data of the three climate periods used in Wilcoxon Test ####
test_data <- bind_cols(PredictData_df_tidy_test_list[[1]], bind_cols(PredictData_df_tidy_test_list[[2]][,4:7], PredictData_df_tidy_test_list[[3]][,4:7]))
str( test_data,1)
# test_data$year <- NULL
# #### Compare columns by WilcoxonText #####
# (wilcox.test( test_data$Y_anomaly_1971, test_data$Y_anomaly_2070))
# (wilcox.test( test_data$Y_anomaly_1971, test_data$Y_anomaly_2070))$p.value
###########################################################################################
#### Loop though five climate models to provide maps of p-values of the Wilcoxon Test ####
#########################################################################################
##############################
#### Preparation for loop ####
#### Create Container to store p-values and plots of the test results in ####
test_data_grouped_2021_anomaly_list <-
test_data_grouped_2070_anomaly_list <-
test_data_grouped_2021_anomaly_plots_list <-
test_data_grouped_2070_anomaly_plots_list <-
test_data_grouped_2021_anomaly_plots_paired_list <-
test_data_grouped_2070_anomaly_plots_paired_list <-
test_data_grouped_2021_anomaly_list_noTitle <-
test_data_grouped_2070_anomaly_list_noTitle <-
test_data_grouped_2021_anomaly_plots_list_noTitle <-
test_data_grouped_2070_anomaly_plots_list_noTitle <-
test_data_grouped_2021_anomaly_plots_paired_list_noTitle <-
test_data_grouped_2070_anomaly_plots_paired_list_noTitle <-
test_data_grouped_2021_anomaly_list_noTitle_noLegend <-
test_data_grouped_2070_anomaly_list_noTitle_noLegend <-
test_data_grouped_2021_anomaly_plots_list_noTitle_noLegend <-
test_data_grouped_2070_anomaly_plots_list_noTitle_noLegend <-
test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend <-
test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend <-
list(MPI=list(), DMI=list(), KNMI=list(), ICTP=list(), SMI=list(), All_RCMs = list())
#### Lists Names used in figures ####
nameList_climate
namelist_RCMs_total <- c(namelist_RCMs, "All_RCMs")
s=1
s
#######################################
#### Define Function used in Loop ####
#####################################
## time Period ##
timePeriod <- list("2021 - 2050", "2070 - 2099")
## Paired ##
testPaired <- list( "non paired Wilcoxon - Test", "paired Wilcoxon - Test")
## Legend List ##
list_legend_Variables <- c("none", "right")
list_legend_export <- c("noLegend", "legend")
## title ##
list_titleVariables <- list(element_text(color="white") , element_text(color="black") )
list_title_export <- list("noTitle", "title")
nameList_climate
plot_variables = function (dataSet, timeP, paired, Var, Tit, Leg){
ggplot(dataSet) +
geom_sf(data = vg2500_krs, colour="white", fill="black") +
geom_sf(aes(fill = cut(dataSet[[5 + Var]], c(-0.1,0.05,0.1,1), m=0) )) +
ggtitle(paste(timePeriod[[timeP]], ": " ,namelist_RCMs_total[[RCMs]], " - ", testPaired[[paired]], sep="")) +
# ggtitle("2021 - Anomalies - non paired")
scale_fill_brewer(type = "seq", palette = "Blues", direction = -1, drop = FALSE,
labels=c("< 0.05", "< 0.1", "> 0.1")) +
guides(fill = guide_legend(title="p-values")) +
theme_bw() +
theme(legend.position = list_legend_Variables[Leg]) +
# theme(legend.title=element_blank()) +
theme(plot.title =list_titleVariables [[Tit]] )
}
# - \nH0: no shift in mean
#### Start of loop trough the five RCMs ####
for (l in seq_along( namelist_RCMs_total)){
print(namelist_RCMs_total[[l]])
#### Create directory for output of this loop ####
#
# cluster <- create_cluster(4)
# set_default_cluster(cluster)
#
# by_group <- test_data %>% filter(RCM == namelist_RCMs[[l]]) %>% partition(comId, cluster = cluster)
# # cluster_get(by_group, "test_data")
#
## Compare Anomalies of 1971 -2000 to 2070 - 2099 ##
test_data_grouped_2070_anomaly_list[[l]] <-
test_data %>%
filter(RCM == namelist_RCMs_total[[l]]) %>%
group_by(comId) %>%
summarise(test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2070)$p.value,
test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2070, paired=T)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jul_anomaly_demean_2070)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2070, paired=T)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2070)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_paired = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2070, paired=T)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2070)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2070, paired=T)$p.value) %>% collect()
## Compare Anomalies of 1971 - 2000 to 2021 - 2050 ##
test_data_grouped_2021_anomaly_list[[l]] <-
test_data %>%
filter(RCM == namelist_RCMs_total[[l]]) %>%
group_by(comId) %>%
summarise(test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2021)$p.value,
test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2021, paired=T)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jul_anomaly_demean_2021)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2021, paired=T)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2021)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_paired = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2021, paired=T)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2021)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2021, paired=T)$p.value
)
#############################
#### Add on Spatial Data ####
test_data_grouped_2021_anomaly_spatial <- inner_join(vg2500_krs, test_data_grouped_2021_anomaly_list[[l]], by = "comId")
test_data_grouped_2070_anomaly_spatial <- inner_join(vg2500_krs, test_data_grouped_2070_anomaly_list[[l]], by = "comId")
#################################
#### Take a look at p-values ####
# View(test_data_grouped_2070_anomaly_spatial)
# View(test_data_grouped_2021_anomaly_spatial)
# View(test_data_grouped_2021_spatial)
# View(test_data_grouped_2070_spatial)
#
##############
#### Maps ####
print(names(test_data_grouped_2021_anomaly_spatial[5 + Var]))
#### non paired ####
Var <- 1
paired <- 1
Tit <- 2
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_list[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_list[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2070_anomaly_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2070_anomaly_plots_list[[l]] , width=16, height=9)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2021_anomaly_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2021_anomaly_plots_list[[l]] , width=16, height=9)
#### non paired - no title no legend ####
Var <- 1
paired <- 1
Tit <- 1
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_list_noTitle[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_list_noTitle[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
#### non paired - no title no legend ####
Var <- 1
paired <- 1
Tit <- 1
Leg <- 1
timeP <- 1
test_data_grouped_2021_anomaly_plots_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
#### paired ####
Var <- 2
timeP <- 2
Tit <- 2
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_paired_list[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_paired_list[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2070_anomaly_paired_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2070_anomaly_plots_paired_list[[l]], width=16, height=9)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2021_anomaly_paired_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2021_anomaly_plots_paired_list[[l]], width=16, height=9)
#### paired - no title ####
Var <- 2
timeP <- 2
Tit <- 1
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_paired_list_noTitle[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_paired_list_noTitle[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
#### paired - no title no legend ####
Var <- 2
timeP <- 2
Tit <- 1
Leg <- 1
timeP <- 1
test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
}
# }
# rm(list=ls())
DMI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[1]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[1]], labels = c("a1)", "a2)"),
ncol=1, nrow=2) , top = text_grob("DMI", color = "black", face = "bold", size = 20, family= " Arial"))
ICTP_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[2]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[2]], labels = c("b1)", "b2)"),
ncol=1, nrow=2) , top = text_grob("ICTP", color = "black", face = "bold", size = 20, family= " Arial"))
KNMI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[3]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[3]], labels = c("c1)", "c2)"),
ncol=1, nrow=2) , top = text_grob("KNMI", color = "black", face = "bold", size = 20, family= " Arial"))
MPI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[4]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[4]], labels = c("d1)", "d2)"),
ncol=1, nrow=2) , top = text_grob("MPI", color = "black", face = "bold", size = 20, family= " Arial"))
SMHI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[5]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[5]], labels = c("e1)", "e2)"),
ncol=1, nrow=2
# , common.legend = TRUE, legend = "right"
) , top = text_grob("SMHI", color = "black", face = "bold", size = 20, family= " Arial"))
AllRCMs_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle[[6]],
test_data_grouped_2070_anomaly_plots_paired_list_noTitle[[6]],
labels = c("f1)", "f2)"),
ncol=1, nrow=2, common.legend = TRUE, legend = "right") , top = text_grob("Avg. of RCMs", color = "black", face = "bold", size = 20, family= " Arial"))
test_data_grouped_2021_anomaly_paired_list_allPlots <-
ggarrange( DMI_annotated, ICTP_annotated, KNMI_annotated, MPI_annotated, SMHI_annotated, AllRCMs_annotated , ncol=6, nrow = 1,
common.legend = TRUE, legend = "right", align ="hv")
test_data_grouped_2021_anomaly_paired_list_allPlots %>%
ggexport(filename = paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[1]],"/Wilcoxon_AllRCMs.png", sep=""),
width=1500, height=500)
| /script_raw/WilcoxonTest.R | no_license | MikyPiky/Project2Script | R | false | false | 18,889 | r | #### Test for significant changes in the predicted yield values for the climate period ####
'
- Load the tidy data
- Filter for climate models
- Filter for the reference and climate periods
- Make on data.frame including the observations for all three climate periods
- Group by comIds and apply test to each cohort
- Combine with spatial information
'
#### Input ####
'
Spatial Information: Shapefile of comdIDs ("vg2500_krs")
BasePrediction.R: tidy.data.frames of yield and yield anomaly predictions based on different estimation models
'
###################
## Load Packages ##
####################################################################################################################################################################
##############################
#### Preparation for loop ####
source("./script/script_raw/BaseModel.R")
#### Create Container to store plots in according to their predictive model #####
for (s in seq_along(nameList_climate)){
dir.create(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]] ,sep=""), showWarnings = FALSE)
}
#### Load tidy data.frame of Yield and Yield_Anomaly Predictions ####
PredictData_df_tidy <- read_csv(paste("./data/data_proj/output/Climate_predicted_allRCMs_total.csv", sep="") )
PredictData_df_tidy
#################################################################################################
#### Loop to create one data.frame for each climate period of 30 yeears, ####
#### i.e the reference period 1971 - 2000 and the climate periods 2021-2050 and 2070 - 2099 ###
##############################################################################################
## Select variables needed for test on Yield Anomalies ##
PredictData_df_tidy <- PredictData_df_tidy %>% select("RCM", "comId", "year", contains("sMA"))
#### Generate list with data.frame container for each climate period: ####
PredictData_df_tidy_test_list <- list(PredictData_df_tidy_test_1971 = data.frame(), PredictData_df_tidy_test_2021= data.frame(),
PredictData_df_tidy_test_2070 = data.frame())
#### Start of loop through three time periods ####
for (r in 1:3){
PredictData_df_tidy_test_list[[r]] <-
PredictData_df_tidy %>%
filter(year >= climateyears_list[[1]][r] & year <= climateyears_list[[2]][r])
}
str(PredictData_df_tidy_test_list[[1]], 1)
str(PredictData_df_tidy_test_list[[2]], 1)
str(PredictData_df_tidy_test_list[[3]], 1)
#### Rename y and y_anomaly of each climate period accordingly ####
names(PredictData_df_tidy_test_list[[1]])[4:7] <- paste(names(PredictData_df_tidy_test_list[[1]])[4:7] , "1971", sep="_" )
names(PredictData_df_tidy_test_list[[2]])[4:7] <- paste(names(PredictData_df_tidy_test_list[[2]])[4:7] , "2021", sep="_" )
names(PredictData_df_tidy_test_list[[3]])[4:7] <- paste(names(PredictData_df_tidy_test_list[[3]])[4:7] , "2070", sep="_" )
#### Make of large data.frane of the data of the three climate periods used in Wilcoxon Test ####
test_data <- bind_cols(PredictData_df_tidy_test_list[[1]], bind_cols(PredictData_df_tidy_test_list[[2]][,4:7], PredictData_df_tidy_test_list[[3]][,4:7]))
str( test_data,1)
# test_data$year <- NULL
# #### Compare columns by WilcoxonText #####
# (wilcox.test( test_data$Y_anomaly_1971, test_data$Y_anomaly_2070))
# (wilcox.test( test_data$Y_anomaly_1971, test_data$Y_anomaly_2070))$p.value
###########################################################################################
#### Loop though five climate models to provide maps of p-values of the Wilcoxon Test ####
#########################################################################################
##############################
#### Preparation for loop ####
#### Create Container to store p-values and plots of the test results in ####
test_data_grouped_2021_anomaly_list <-
test_data_grouped_2070_anomaly_list <-
test_data_grouped_2021_anomaly_plots_list <-
test_data_grouped_2070_anomaly_plots_list <-
test_data_grouped_2021_anomaly_plots_paired_list <-
test_data_grouped_2070_anomaly_plots_paired_list <-
test_data_grouped_2021_anomaly_list_noTitle <-
test_data_grouped_2070_anomaly_list_noTitle <-
test_data_grouped_2021_anomaly_plots_list_noTitle <-
test_data_grouped_2070_anomaly_plots_list_noTitle <-
test_data_grouped_2021_anomaly_plots_paired_list_noTitle <-
test_data_grouped_2070_anomaly_plots_paired_list_noTitle <-
test_data_grouped_2021_anomaly_list_noTitle_noLegend <-
test_data_grouped_2070_anomaly_list_noTitle_noLegend <-
test_data_grouped_2021_anomaly_plots_list_noTitle_noLegend <-
test_data_grouped_2070_anomaly_plots_list_noTitle_noLegend <-
test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend <-
test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend <-
list(MPI=list(), DMI=list(), KNMI=list(), ICTP=list(), SMI=list(), All_RCMs = list())
#### Lists Names used in figures ####
nameList_climate
namelist_RCMs_total <- c(namelist_RCMs, "All_RCMs")
s=1
s
#######################################
#### Define Function used in Loop ####
#####################################
## time Period ##
timePeriod <- list("2021 - 2050", "2070 - 2099")
## Paired ##
testPaired <- list( "non paired Wilcoxon - Test", "paired Wilcoxon - Test")
## Legend List ##
list_legend_Variables <- c("none", "right")
list_legend_export <- c("noLegend", "legend")
## title ##
list_titleVariables <- list(element_text(color="white") , element_text(color="black") )
list_title_export <- list("noTitle", "title")
nameList_climate
plot_variables = function (dataSet, timeP, paired, Var, Tit, Leg){
ggplot(dataSet) +
geom_sf(data = vg2500_krs, colour="white", fill="black") +
geom_sf(aes(fill = cut(dataSet[[5 + Var]], c(-0.1,0.05,0.1,1), m=0) )) +
ggtitle(paste(timePeriod[[timeP]], ": " ,namelist_RCMs_total[[RCMs]], " - ", testPaired[[paired]], sep="")) +
# ggtitle("2021 - Anomalies - non paired")
scale_fill_brewer(type = "seq", palette = "Blues", direction = -1, drop = FALSE,
labels=c("< 0.05", "< 0.1", "> 0.1")) +
guides(fill = guide_legend(title="p-values")) +
theme_bw() +
theme(legend.position = list_legend_Variables[Leg]) +
# theme(legend.title=element_blank()) +
theme(plot.title =list_titleVariables [[Tit]] )
}
# - \nH0: no shift in mean
#### Start of loop trough the five RCMs ####
for (l in seq_along( namelist_RCMs_total)){
print(namelist_RCMs_total[[l]])
#### Create directory for output of this loop ####
#
# cluster <- create_cluster(4)
# set_default_cluster(cluster)
#
# by_group <- test_data %>% filter(RCM == namelist_RCMs[[l]]) %>% partition(comId, cluster = cluster)
# # cluster_get(by_group, "test_data")
#
## Compare Anomalies of 1971 -2000 to 2070 - 2099 ##
test_data_grouped_2070_anomaly_list[[l]] <-
test_data %>%
filter(RCM == namelist_RCMs_total[[l]]) %>%
group_by(comId) %>%
summarise(test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2070)$p.value,
test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2070, paired=T)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jul_anomaly_demean_2070)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2070, paired=T)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2070)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_paired = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2070, paired=T)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2070)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2070, paired=T)$p.value) %>% collect()
## Compare Anomalies of 1971 - 2000 to 2021 - 2050 ##
test_data_grouped_2021_anomaly_list[[l]] <-
test_data %>%
filter(RCM == namelist_RCMs_total[[l]]) %>%
group_by(comId) %>%
summarise(test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2021)$p.value,
test_sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2021, paired=T)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jul_anomaly_demean_2021)$p.value,
test_sMA_lm.fit_SMI_6_Jul_anomaly_demean_paired = wilcox.test(sMA_lm.fit_SMI_6_Jul_anomaly_demean_1971,
sMA_lm.fit_SMI_6_Jun_Aug_anomaly_demean_2021, paired=T)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2021)$p.value,
test_sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_paired = wilcox.test(sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_1971,
sMA_mgcv_bestEARTH_noInteraction_T_anomaly_demean_2021, paired=T)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2021)$p.value,
test_sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_paired = wilcox.test(sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_1971,
sMA_mgcv_SMI_6_Jun_Aug_anomaly_demean_2021, paired=T)$p.value
)
#############################
#### Add on Spatial Data ####
test_data_grouped_2021_anomaly_spatial <- inner_join(vg2500_krs, test_data_grouped_2021_anomaly_list[[l]], by = "comId")
test_data_grouped_2070_anomaly_spatial <- inner_join(vg2500_krs, test_data_grouped_2070_anomaly_list[[l]], by = "comId")
#################################
#### Take a look at p-values ####
# View(test_data_grouped_2070_anomaly_spatial)
# View(test_data_grouped_2021_anomaly_spatial)
# View(test_data_grouped_2021_spatial)
# View(test_data_grouped_2070_spatial)
#
##############
#### Maps ####
print(names(test_data_grouped_2021_anomaly_spatial[5 + Var]))
#### non paired ####
Var <- 1
paired <- 1
Tit <- 2
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_list[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_list[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2070_anomaly_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2070_anomaly_plots_list[[l]] , width=16, height=9)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2021_anomaly_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2021_anomaly_plots_list[[l]] , width=16, height=9)
#### non paired - no title no legend ####
Var <- 1
paired <- 1
Tit <- 1
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_list_noTitle[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_list_noTitle[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
#### non paired - no title no legend ####
Var <- 1
paired <- 1
Tit <- 1
Leg <- 1
timeP <- 1
test_data_grouped_2021_anomaly_plots_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
#### paired ####
Var <- 2
timeP <- 2
Tit <- 2
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_paired_list[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_paired_list[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2070_anomaly_paired_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2070_anomaly_plots_paired_list[[l]], width=16, height=9)
ggsave(paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[s]],"/Wilcoxon_2021_anomaly_paired_",namelist_RCMs_total[[l]],".pdf", sep="") ,
test_data_grouped_2021_anomaly_plots_paired_list[[l]], width=16, height=9)
#### paired - no title ####
Var <- 2
timeP <- 2
Tit <- 1
Leg <- 2
timeP <- 1
test_data_grouped_2021_anomaly_plots_paired_list_noTitle[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_paired_list_noTitle[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
#### paired - no title no legend ####
Var <- 2
timeP <- 2
Tit <- 1
Leg <- 1
timeP <- 1
test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2021_anomaly_spatial, timeP, paired, Var, Tit, Leg)
timeP <- 2
test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[l]] <- plot_variables(test_data_grouped_2070_anomaly_spatial, timeP, paired, Var, Tit, Leg)
}
# }
# rm(list=ls())
DMI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[1]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[1]], labels = c("a1)", "a2)"),
ncol=1, nrow=2) , top = text_grob("DMI", color = "black", face = "bold", size = 20, family= " Arial"))
ICTP_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[2]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[2]], labels = c("b1)", "b2)"),
ncol=1, nrow=2) , top = text_grob("ICTP", color = "black", face = "bold", size = 20, family= " Arial"))
KNMI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[3]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[3]], labels = c("c1)", "c2)"),
ncol=1, nrow=2) , top = text_grob("KNMI", color = "black", face = "bold", size = 20, family= " Arial"))
MPI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[4]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[4]], labels = c("d1)", "d2)"),
ncol=1, nrow=2) , top = text_grob("MPI", color = "black", face = "bold", size = 20, family= " Arial"))
SMHI_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle_noLegend[[5]], test_data_grouped_2070_anomaly_plots_paired_list_noTitle_noLegend[[5]], labels = c("e1)", "e2)"),
ncol=1, nrow=2
# , common.legend = TRUE, legend = "right"
) , top = text_grob("SMHI", color = "black", face = "bold", size = 20, family= " Arial"))
AllRCMs_annotated <- annotate_figure( ggarrange(test_data_grouped_2021_anomaly_plots_paired_list_noTitle[[6]],
test_data_grouped_2070_anomaly_plots_paired_list_noTitle[[6]],
labels = c("f1)", "f2)"),
ncol=1, nrow=2, common.legend = TRUE, legend = "right") , top = text_grob("Avg. of RCMs", color = "black", face = "bold", size = 20, family= " Arial"))
test_data_grouped_2021_anomaly_paired_list_allPlots <-
ggarrange( DMI_annotated, ICTP_annotated, KNMI_annotated, MPI_annotated, SMHI_annotated, AllRCMs_annotated , ncol=6, nrow = 1,
common.legend = TRUE, legend = "right", align ="hv")
test_data_grouped_2021_anomaly_paired_list_allPlots %>%
ggexport(filename = paste("./figures/figures_exploratory/Proj/Wilcoxon/", nameList_climate[[1]],"/Wilcoxon_AllRCMs.png", sep=""),
width=1500, height=500)
|
setwd( "C:/Users/janja/Desktop/HarrisBurg/Semester 2/506 - Exploratory Data Analytics/Code Portfolio")
library(readr)
# To load CSV file
ozone <- read_csv("US EPA data 2017.csv")
View(ozone)
#rewrite the names of the columns to remove any spaces.
names(ozone) <- make.names(names(ozone))
# To check the number of rows
nrow(ozone)
# To check the number of columns
ncol(ozone)
# To check the data structures using str() function
str(ozone)
# start and end of dataset head() and tail() function
head(ozone)
tail(ozone)
#selected data viewing
head(ozone[, c(6:7, 10)])
tail(ozone[, c(6:7, 10)])
#variable to see what time measurements
table(ozone$`State.Code`)
library(dplyr)
#saving files as datframes or displaying using filter function
filter(ozone, State.Code == "36"
& County.Code == "033"
& Site.Num == "10") %>%
select( State.Code, County.Code,
Site.Num) %>%
as.data.frame
#counting and viewing unique data
select(ozone, State.Name) %>% unique %>% nrow
unique(ozone$State.Name)
#Sumarizing data
summary(ozone$Observation.Percent)
#additional breakdown
quantile(ozone$Observation.Percent, seq(0, 1, 0.1))
# Ranking the state with highest value
ranking <- group_by(ozone, State.Name, County.Name) %>%
summarise(ozone = mean(Observation.Percent)) %>%
as.data.frame %>%
arrange(desc(ozone))
ranking
#seeing top 10
head(ranking, 10)
#bottom 10
tail(ranking, 10)
#checking number of observations
filter(ozone, State.Name == "California" & County.Name == "Mariposa") %>% nrow
ozone <- mutate(ozone, Date.Local = as.Date(X1st.Max.DateTime))
#splitting at hourly levels
filter(ozone, State.Name == "California" & County.Name == "Mariposa") %>%
mutate(month = factor(months(X1st.Max.DateTime), levels = month.name)) %>%
group_by(month) %>%
summarize(ozone = mean(Sample.Duration)) | /Code Portfolio/Week3/week3.R | no_license | njanjam1/EDA | R | false | false | 1,953 | r | setwd( "C:/Users/janja/Desktop/HarrisBurg/Semester 2/506 - Exploratory Data Analytics/Code Portfolio")
library(readr)
# To load CSV file
ozone <- read_csv("US EPA data 2017.csv")
View(ozone)
#rewrite the names of the columns to remove any spaces.
names(ozone) <- make.names(names(ozone))
# To check the number of rows
nrow(ozone)
# To check the number of columns
ncol(ozone)
# To check the data structures using str() function
str(ozone)
# start and end of dataset head() and tail() function
head(ozone)
tail(ozone)
#selected data viewing
head(ozone[, c(6:7, 10)])
tail(ozone[, c(6:7, 10)])
#variable to see what time measurements
table(ozone$`State.Code`)
library(dplyr)
#saving files as datframes or displaying using filter function
filter(ozone, State.Code == "36"
& County.Code == "033"
& Site.Num == "10") %>%
select( State.Code, County.Code,
Site.Num) %>%
as.data.frame
#counting and viewing unique data
select(ozone, State.Name) %>% unique %>% nrow
unique(ozone$State.Name)
#Sumarizing data
summary(ozone$Observation.Percent)
#additional breakdown
quantile(ozone$Observation.Percent, seq(0, 1, 0.1))
# Ranking the state with highest value
ranking <- group_by(ozone, State.Name, County.Name) %>%
summarise(ozone = mean(Observation.Percent)) %>%
as.data.frame %>%
arrange(desc(ozone))
ranking
#seeing top 10
head(ranking, 10)
#bottom 10
tail(ranking, 10)
#checking number of observations
filter(ozone, State.Name == "California" & County.Name == "Mariposa") %>% nrow
ozone <- mutate(ozone, Date.Local = as.Date(X1st.Max.DateTime))
#splitting at hourly levels
filter(ozone, State.Name == "California" & County.Name == "Mariposa") %>%
mutate(month = factor(months(X1st.Max.DateTime), levels = month.name)) %>%
group_by(month) %>%
summarize(ozone = mean(Sample.Duration)) |
anual(rgb(0,0,1), rgb(0.6156862745098039,0.7333333333333333,1))
rm(list = ls())
ENC<- cargaMasiva("matrimonios/matrimonios")
g1<- graficaLinea(ENC$"Hoja2", inicio = 60, rotar = "h")
exportarLatex("graficas/matrimonios/1_01.tex", g1)
g1<- graficaColCategorias(ENC$"Hoja3", etiquetasCategorias = "A",ancho = 0.55,
ruta = "graficas/matrimonios/1_02.tex", etiquetas = "h")
g1<- graficaColCategorias(ENC$"Hoja6", etiquetasCategorias = "A",ancho = 0.55,
ruta = "graficas/matrimonios/1_03.tex", etiquetas = "h")
g11<- graficaBar(ENC$"Hoja4",ancho = .45, ordenar = FALSE)
g11 <- etiquetasBarras(g11)
exportarLatex("graficas/matrimonios/1_04.tex", g11)
g11<- graficaBar(ENC$"Hoja5",ancho = .45, ordenar = FALSE)
g11 <- etiquetasBarras(g11)
exportarLatex("graficas/matrimonios/1_05.tex", g11)
g1<- graficaColCategorias(ENC$"Hoja7", etiquetasCategorias = "A",ancho = 0.55,ejeX = "v",
ruta = "graficas/matrimonios/1_06.tex", etiquetas = "h")
g1<- graficaColCategorias(ENC$"Hoja8", etiquetasCategorias = "A",ancho = 0.55,ejeX = "v",
ruta = "graficas/matrimonios/1_07.tex", etiquetas = "h")
| /MATRIMONIOS.R | no_license | hugoallan9/UNFACOMPENDIO | R | false | false | 1,254 | r |
anual(rgb(0,0,1), rgb(0.6156862745098039,0.7333333333333333,1))
rm(list = ls())
ENC<- cargaMasiva("matrimonios/matrimonios")
g1<- graficaLinea(ENC$"Hoja2", inicio = 60, rotar = "h")
exportarLatex("graficas/matrimonios/1_01.tex", g1)
g1<- graficaColCategorias(ENC$"Hoja3", etiquetasCategorias = "A",ancho = 0.55,
ruta = "graficas/matrimonios/1_02.tex", etiquetas = "h")
g1<- graficaColCategorias(ENC$"Hoja6", etiquetasCategorias = "A",ancho = 0.55,
ruta = "graficas/matrimonios/1_03.tex", etiquetas = "h")
g11<- graficaBar(ENC$"Hoja4",ancho = .45, ordenar = FALSE)
g11 <- etiquetasBarras(g11)
exportarLatex("graficas/matrimonios/1_04.tex", g11)
g11<- graficaBar(ENC$"Hoja5",ancho = .45, ordenar = FALSE)
g11 <- etiquetasBarras(g11)
exportarLatex("graficas/matrimonios/1_05.tex", g11)
g1<- graficaColCategorias(ENC$"Hoja7", etiquetasCategorias = "A",ancho = 0.55,ejeX = "v",
ruta = "graficas/matrimonios/1_06.tex", etiquetas = "h")
g1<- graficaColCategorias(ENC$"Hoja8", etiquetasCategorias = "A",ancho = 0.55,ejeX = "v",
ruta = "graficas/matrimonios/1_07.tex", etiquetas = "h")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{df_ex}
\alias{df_ex}
\title{Pivot table in data frame with with thousands indicator and decimal numbers}
\format{
A data frame.
}
\usage{
df_ex
}
\description{
Pivot table in data frame with with thousands indicator and decimal numbers.
}
\seealso{
\code{\link{pt_ex}}
Other pivot table in data frame:
\code{\link{df_ex_compact}},
\code{\link{df_pivottabler}}
}
\concept{pivot table in data frame}
\keyword{datasets}
| /man/df_ex.Rd | permissive | josesamos/flattabler | R | false | true | 525 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{df_ex}
\alias{df_ex}
\title{Pivot table in data frame with with thousands indicator and decimal numbers}
\format{
A data frame.
}
\usage{
df_ex
}
\description{
Pivot table in data frame with with thousands indicator and decimal numbers.
}
\seealso{
\code{\link{pt_ex}}
Other pivot table in data frame:
\code{\link{df_ex_compact}},
\code{\link{df_pivottabler}}
}
\concept{pivot table in data frame}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dm6_NcoI_10000}
\alias{dm6_NcoI_10000}
\title{Genomic features for dm6 genome and NcoI restriction enzyme at 10 Kbp}
\format{A data frame with 13758 rows and 5 variables:
\describe{
\item{chr:}{chromosome}
\item{map:}{mappability as computed by gem}
\item{res:}{restriction enzyme density per 1 Kbp computed by Biostrings::matchPattern()}
\item{cg:}{cg content as computed by bedtools}
\item{bin:}{genomic bin with the format chromosome:start_position}
\item{pos:}{start postion of the genomic bin}
}}
\usage{
dm6_NcoI_10000
}
\description{
A \code{data.frame} containing the mappability, restriction
enzyme density and CG proportion of the dm6 genome and
NcoI restriction enzyme in 10 Kbp bins
}
\keyword{datasets}
| /man/dm6_NcoI_10000.Rd | no_license | 4DGenome/hicfeatures | R | false | true | 842 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dm6_NcoI_10000}
\alias{dm6_NcoI_10000}
\title{Genomic features for dm6 genome and NcoI restriction enzyme at 10 Kbp}
\format{A data frame with 13758 rows and 5 variables:
\describe{
\item{chr:}{chromosome}
\item{map:}{mappability as computed by gem}
\item{res:}{restriction enzyme density per 1 Kbp computed by Biostrings::matchPattern()}
\item{cg:}{cg content as computed by bedtools}
\item{bin:}{genomic bin with the format chromosome:start_position}
\item{pos:}{start postion of the genomic bin}
}}
\usage{
dm6_NcoI_10000
}
\description{
A \code{data.frame} containing the mappability, restriction
enzyme density and CG proportion of the dm6 genome and
NcoI restriction enzyme in 10 Kbp bins
}
\keyword{datasets}
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
unique.array<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::unique.array(params)
}
}
| /R/unique.array.R | no_license | granatb/RapeR | R | false | false | 679 | r |
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
unique.array<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::unique.array(params)
}
}
|
## ----cache=TRUE----------------------------------------------------------
storm <- read.csv(bzfile("repdata_data_StormData.csv.bz2"))
## ------------------------------------------------------------------------
# number of unique event types
length(unique(storm$EVTYPE))
# translate all letters to lowercase
event_types <- tolower(storm$EVTYPE)
# replace all punct. characters with a space
event_types <- gsub("[[:blank:][:punct:]+]", " ", event_types)
length(unique(event_types))
# update the data frame
storm$EVTYPE <- event_types
## ------------------------------------------------------------------------
library(plyr)
casualties <- ddply(storm, .(EVTYPE), summarize,
fatalities = sum(FATALITIES),
injuries = sum(INJURIES))
# Find events that caused most death and injury
fatal_events <- head(casualties[order(casualties$fatalities, decreasing = T), ], 10)
injury_events <- head(casualties[order(casualties$injuries, decreasing = T), ], 10)
## ------------------------------------------------------------------------
fatal_events[, c("EVTYPE", "fatalities")]
## ------------------------------------------------------------------------
injury_events[, c("EVTYPE", "injuries")]
## ------------------------------------------------------------------------
exp_transform <- function(e) {
# h -> hundred, k -> thousand, m -> million, b -> billion
if (e %in% c('h', 'H'))
return(2)
else if (e %in% c('k', 'K'))
return(3)
else if (e %in% c('m', 'M'))
return(6)
else if (e %in% c('b', 'B'))
return(9)
else if (!is.na(as.numeric(e))) # if a digit
return(as.numeric(e))
else if (e %in% c('', '-', '?', '+'))
return(0)
else {
stop("Invalid exponent value.")
}
}
## ----cache=TRUE----------------------------------------------------------
prop_dmg_exp <- sapply(storm$PROPDMGEXP, FUN=exp_transform)
storm$prop_dmg <- storm$PROPDMG * (10 ** prop_dmg_exp)
crop_dmg_exp <- sapply(storm$CROPDMGEXP, FUN=exp_transform)
storm$crop_dmg <- storm$CROPDMG * (10 ** crop_dmg_exp)
## ------------------------------------------------------------------------
# Compute the economic loss by event type
library(plyr)
econ_loss <- ddply(storm, .(EVTYPE), summarize,
prop_dmg = sum(prop_dmg),
crop_dmg = sum(crop_dmg))
# filter out events that caused no economic loss
econ_loss <- econ_loss[(econ_loss$prop_dmg > 0 | econ_loss$crop_dmg > 0), ]
prop_dmg_events <- head(econ_loss[order(econ_loss$prop_dmg, decreasing = T), ], 10)
crop_dmg_events <- head(econ_loss[order(econ_loss$crop_dmg, decreasing = T), ], 10)
## ------------------------------------------------------------------------
prop_dmg_events[, c("EVTYPE", "prop_dmg")]
## ------------------------------------------------------------------------
crop_dmg_events[, c("EVTYPE", "crop_dmg")]
## ------------------------------------------------------------------------
library(ggplot2)
library(gridExtra)
# Set the levels in order
p1 <- ggplot(data=fatal_events,
aes(x=reorder(EVTYPE, fatalities), y=fatalities, fill=fatalities)) +
geom_bar(stat="identity") +
coord_flip() +
ylab("Total number of fatalities") +
xlab("Event type") +
theme(legend.position="none")
p2 <- ggplot(data=injury_events,
aes(x=reorder(EVTYPE, injuries), y=injuries, fill=injuries)) +
geom_bar(stat="identity") +
coord_flip() +
ylab("Total number of injuries") +
xlab("Event type") +
theme(legend.position="none")
grid.arrange(p1, p2, main="Top deadly weather events in the US (1950-2011)")
## ------------------------------------------------------------------------
library(ggplot2)
library(gridExtra)
# Set the levels in order
p1 <- ggplot(data=prop_dmg_events,
aes(x=reorder(EVTYPE, prop_dmg), y=log10(prop_dmg), fill=prop_dmg )) +
geom_bar(stat="identity") +
coord_flip() +
xlab("Event type") +
ylab("Property damage in dollars (log-scale)") +
theme(legend.position="none")
p2 <- ggplot(data=crop_dmg_events,
aes(x=reorder(EVTYPE, crop_dmg), y=crop_dmg, fill=crop_dmg)) +
geom_bar(stat="identity") +
coord_flip() +
xlab("Event type") +
ylab("Crop damage in dollars") +
theme(legend.position="none")
grid.arrange(p1, p2, main="Weather costs to the US economy (1950-2011)") | /storm_analysis.R | no_license | vikramvishal/datasharing | R | false | false | 4,454 | r | ## ----cache=TRUE----------------------------------------------------------
storm <- read.csv(bzfile("repdata_data_StormData.csv.bz2"))
## ------------------------------------------------------------------------
# number of unique event types
length(unique(storm$EVTYPE))
# translate all letters to lowercase
event_types <- tolower(storm$EVTYPE)
# replace all punct. characters with a space
event_types <- gsub("[[:blank:][:punct:]+]", " ", event_types)
length(unique(event_types))
# update the data frame
storm$EVTYPE <- event_types
## ------------------------------------------------------------------------
library(plyr)
casualties <- ddply(storm, .(EVTYPE), summarize,
fatalities = sum(FATALITIES),
injuries = sum(INJURIES))
# Find events that caused most death and injury
fatal_events <- head(casualties[order(casualties$fatalities, decreasing = T), ], 10)
injury_events <- head(casualties[order(casualties$injuries, decreasing = T), ], 10)
## ------------------------------------------------------------------------
fatal_events[, c("EVTYPE", "fatalities")]
## ------------------------------------------------------------------------
injury_events[, c("EVTYPE", "injuries")]
## ------------------------------------------------------------------------
exp_transform <- function(e) {
# h -> hundred, k -> thousand, m -> million, b -> billion
if (e %in% c('h', 'H'))
return(2)
else if (e %in% c('k', 'K'))
return(3)
else if (e %in% c('m', 'M'))
return(6)
else if (e %in% c('b', 'B'))
return(9)
else if (!is.na(as.numeric(e))) # if a digit
return(as.numeric(e))
else if (e %in% c('', '-', '?', '+'))
return(0)
else {
stop("Invalid exponent value.")
}
}
## ----cache=TRUE----------------------------------------------------------
prop_dmg_exp <- sapply(storm$PROPDMGEXP, FUN=exp_transform)
storm$prop_dmg <- storm$PROPDMG * (10 ** prop_dmg_exp)
crop_dmg_exp <- sapply(storm$CROPDMGEXP, FUN=exp_transform)
storm$crop_dmg <- storm$CROPDMG * (10 ** crop_dmg_exp)
## ------------------------------------------------------------------------
# Compute the economic loss by event type
library(plyr)
econ_loss <- ddply(storm, .(EVTYPE), summarize,
prop_dmg = sum(prop_dmg),
crop_dmg = sum(crop_dmg))
# filter out events that caused no economic loss
econ_loss <- econ_loss[(econ_loss$prop_dmg > 0 | econ_loss$crop_dmg > 0), ]
prop_dmg_events <- head(econ_loss[order(econ_loss$prop_dmg, decreasing = T), ], 10)
crop_dmg_events <- head(econ_loss[order(econ_loss$crop_dmg, decreasing = T), ], 10)
## ------------------------------------------------------------------------
prop_dmg_events[, c("EVTYPE", "prop_dmg")]
## ------------------------------------------------------------------------
crop_dmg_events[, c("EVTYPE", "crop_dmg")]
## ------------------------------------------------------------------------
library(ggplot2)
library(gridExtra)
# Set the levels in order
p1 <- ggplot(data=fatal_events,
aes(x=reorder(EVTYPE, fatalities), y=fatalities, fill=fatalities)) +
geom_bar(stat="identity") +
coord_flip() +
ylab("Total number of fatalities") +
xlab("Event type") +
theme(legend.position="none")
p2 <- ggplot(data=injury_events,
aes(x=reorder(EVTYPE, injuries), y=injuries, fill=injuries)) +
geom_bar(stat="identity") +
coord_flip() +
ylab("Total number of injuries") +
xlab("Event type") +
theme(legend.position="none")
grid.arrange(p1, p2, main="Top deadly weather events in the US (1950-2011)")
## ------------------------------------------------------------------------
library(ggplot2)
library(gridExtra)
# Set the levels in order
p1 <- ggplot(data=prop_dmg_events,
aes(x=reorder(EVTYPE, prop_dmg), y=log10(prop_dmg), fill=prop_dmg )) +
geom_bar(stat="identity") +
coord_flip() +
xlab("Event type") +
ylab("Property damage in dollars (log-scale)") +
theme(legend.position="none")
p2 <- ggplot(data=crop_dmg_events,
aes(x=reorder(EVTYPE, crop_dmg), y=crop_dmg, fill=crop_dmg)) +
geom_bar(stat="identity") +
coord_flip() +
xlab("Event type") +
ylab("Crop damage in dollars") +
theme(legend.position="none")
grid.arrange(p1, p2, main="Weather costs to the US economy (1950-2011)") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudwatch_operations.R
\name{cloudwatch_put_dashboard}
\alias{cloudwatch_put_dashboard}
\title{Creates a dashboard if it does not already exist, or updates an existing
dashboard}
\usage{
cloudwatch_put_dashboard(DashboardName, DashboardBody)
}
\arguments{
\item{DashboardName}{[required] The name of the dashboard. If a dashboard with this name already exists,
this call modifies that dashboard, replacing its current contents.
Otherwise, a new dashboard is created. The maximum length is 255, and
valid characters are A-Z, a-z, 0-9, "-", and "_". This parameter is
required.}
\item{DashboardBody}{[required] The detailed information about the dashboard in JSON format, including
the widgets to include and their location on the dashboard. This
parameter is required.
For more information about the syntax, see \href{https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html}{Dashboard Body Structure and Syntax}.}
}
\description{
Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.
See \url{https://www.paws-r-sdk.com/docs/cloudwatch_put_dashboard/} for full documentation.
}
\keyword{internal}
| /cran/paws.management/man/cloudwatch_put_dashboard.Rd | permissive | paws-r/paws | R | false | true | 1,346 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudwatch_operations.R
\name{cloudwatch_put_dashboard}
\alias{cloudwatch_put_dashboard}
\title{Creates a dashboard if it does not already exist, or updates an existing
dashboard}
\usage{
cloudwatch_put_dashboard(DashboardName, DashboardBody)
}
\arguments{
\item{DashboardName}{[required] The name of the dashboard. If a dashboard with this name already exists,
this call modifies that dashboard, replacing its current contents.
Otherwise, a new dashboard is created. The maximum length is 255, and
valid characters are A-Z, a-z, 0-9, "-", and "_". This parameter is
required.}
\item{DashboardBody}{[required] The detailed information about the dashboard in JSON format, including
the widgets to include and their location on the dashboard. This
parameter is required.
For more information about the syntax, see \href{https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html}{Dashboard Body Structure and Syntax}.}
}
\description{
Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.
See \url{https://www.paws-r-sdk.com/docs/cloudwatch_put_dashboard/} for full documentation.
}
\keyword{internal}
|
rm(list=ls())
##colnames(north_05) = c("Time","Pressure (mH2O)")
data = readLines("data/T3_thermistor_array.csv")
data = data[c(-1)]
data = read.csv(textConnection(data),stringsAsFactors=FALSE,header=FALSE)
data[[1]] = strptime(data[[1]],format="%m/%d/%Y %H:%M",tz="GMT")
thermistor = data.frame(data)
colnames(thermistor) = c("Time","TM1","TM2","TM3","TM4","TM5","TM6")
data = readLines("data/T3_Therm.csv")
data = data[c(-1)]
data = read.csv(textConnection(data),stringsAsFactors=FALSE,header=FALSE)
data[[1]] = strptime(data[[1]],format="%Y-%m-%d %H:%M:%S",tz="GMT")
data = data.frame(data)
colnames(data)=colnames(thermistor)
thermistor = rbind(thermistor,data)
data = readLines("data/RG3_T3_T3_Temps.dat")
data = data[-(1:4)]
data = read.csv(textConnection(data),stringsAsFactors=FALSE,header=FALSE)
data[[1]] = strptime(data[[1]],format="%Y-%m-%d %H:%M:%S",tz="GMT")
data = data.frame(data[[1]],data[[3]],data[[4]],data[[5]],data[[6]],data[[7]],data[[8]])
colnames(data)=colnames(thermistor)
thermistor = rbind(thermistor,data)
stop()
RG3 = read.csv("RG3.csv",stringsAsFactors=FALSE)
RG3[[1]] = strptime(RG3[[1]],format="%Y-%m-%d %H:%M:%S",tz="GMT")
start.time = range(north_river[,1])[1]
start.time = as.POSIXct(start.time,format="%Y-%m-%d %H:%M:%S",tz="GMT")
end.time = range(north_river[,1])[2]
end.time = as.POSIXct(end.time,format="%Y-%m-%d %H:%M:%S",tz="GMT")
time.ticks = seq(start.time-3600*24,end.time,3600*24*5)
jpeg(filename="ERT_north.jpeg",width=10,height=8,units='in',res=200,quality=100)
plot(north_river[,1],north_river[,2],type="l",ylim=c(0,3),col="blue",lwd=2,
xlim=range(start.time,end.time),
axes = FALSE,
xlab = NA,
ylab = NA,
main="ERT_north"
)
lines(north_05[,1],north_05[,2],col="green",lwd=2)
lines(north_2[,1],north_2[,2],col="red",lwd=2)
axis(side=1,at=time.ticks,label=format(time.ticks,format="%m/%d/%y"))
mtext(side=1,text="Time (day)",line=3)
axis(side=2,las=2,line=0.5)
mtext(side=2,text="Pressure (m)",line=3)
legend("bottom",c("river","shallow pressure","deep pressure"),lty=1,lwd=2,
col=c("blue","green","red"),
bty='n'
)
par(new=T)
plot(RG3[[1]],RG3[[2]],
ylim=c(103,106),
xlim=range(start.time,end.time),
type='l',col='black',lwd=2,
axes = FALSE,
xlab = NA,
ylab = NA,
)
axis(side=4,las=2,line=-2,col="blue")
mtext(side=4,text="River level(m)",line=1,col="blue")
legend("top",c("RG3 river level"),lty=1,lwd=2,
col=c("black"),bty="n")
dev.off()
jpeg(filename="ERT_south.jpeg",width=10,height=8,units='in',res=200,quality=100)
plot(south_river[,1],south_river[,2],type="l",ylim=c(0,3),col="blue",lwd=2,
xlim=range(start.time,end.time),
axes = FALSE,
xlab = NA,
ylab = NA,
)
lines(south_05[,1],south_05[,2],col="green",lwd=2)
lines(south_2[,1],south_2[,2],col="red",lwd=2)
axis(side=1,at=time.ticks,label=format(time.ticks,format="%m/%d/%y"))
mtext(side=1,text="Time (day)",line=3)
axis(side=2,las=2,line=0.5)
mtext(side=2,text="Pressure (m)",line=3)
legend("bottom",c("river","shallow pressure","deep pressure"),lty=1,lwd=2,
col=c("blue","green","red"),
bty='n'
)
par(new=T)
plot(RG3[[1]],RG3[[2]],
ylim=c(103,106),
xlim=range(start.time,end.time),
type='l',col='black',lwd=2,
axes = FALSE,
xlab = NA,
ylab = NA,
main="ERT_south"
)
axis(side=4,las=2,line=-2,col="blue")
mtext(side=4,text="River level(m)",line=1,col="blue")
legend("top",c("RG3 river level"),lty=1,lwd=2,
col=c("black"),bty="n")
dev.off()
| /sensitivity/themistor/plot_pressure.data.R | no_license | mrubayet/archived_codes_for_sfa_modeling | R | false | false | 3,583 | r | rm(list=ls())
##colnames(north_05) = c("Time","Pressure (mH2O)")
data = readLines("data/T3_thermistor_array.csv")
data = data[c(-1)]
data = read.csv(textConnection(data),stringsAsFactors=FALSE,header=FALSE)
data[[1]] = strptime(data[[1]],format="%m/%d/%Y %H:%M",tz="GMT")
thermistor = data.frame(data)
colnames(thermistor) = c("Time","TM1","TM2","TM3","TM4","TM5","TM6")
data = readLines("data/T3_Therm.csv")
data = data[c(-1)]
data = read.csv(textConnection(data),stringsAsFactors=FALSE,header=FALSE)
data[[1]] = strptime(data[[1]],format="%Y-%m-%d %H:%M:%S",tz="GMT")
data = data.frame(data)
colnames(data)=colnames(thermistor)
thermistor = rbind(thermistor,data)
data = readLines("data/RG3_T3_T3_Temps.dat")
data = data[-(1:4)]
data = read.csv(textConnection(data),stringsAsFactors=FALSE,header=FALSE)
data[[1]] = strptime(data[[1]],format="%Y-%m-%d %H:%M:%S",tz="GMT")
data = data.frame(data[[1]],data[[3]],data[[4]],data[[5]],data[[6]],data[[7]],data[[8]])
colnames(data)=colnames(thermistor)
thermistor = rbind(thermistor,data)
stop()
RG3 = read.csv("RG3.csv",stringsAsFactors=FALSE)
RG3[[1]] = strptime(RG3[[1]],format="%Y-%m-%d %H:%M:%S",tz="GMT")
start.time = range(north_river[,1])[1]
start.time = as.POSIXct(start.time,format="%Y-%m-%d %H:%M:%S",tz="GMT")
end.time = range(north_river[,1])[2]
end.time = as.POSIXct(end.time,format="%Y-%m-%d %H:%M:%S",tz="GMT")
time.ticks = seq(start.time-3600*24,end.time,3600*24*5)
jpeg(filename="ERT_north.jpeg",width=10,height=8,units='in',res=200,quality=100)
plot(north_river[,1],north_river[,2],type="l",ylim=c(0,3),col="blue",lwd=2,
xlim=range(start.time,end.time),
axes = FALSE,
xlab = NA,
ylab = NA,
main="ERT_north"
)
lines(north_05[,1],north_05[,2],col="green",lwd=2)
lines(north_2[,1],north_2[,2],col="red",lwd=2)
axis(side=1,at=time.ticks,label=format(time.ticks,format="%m/%d/%y"))
mtext(side=1,text="Time (day)",line=3)
axis(side=2,las=2,line=0.5)
mtext(side=2,text="Pressure (m)",line=3)
legend("bottom",c("river","shallow pressure","deep pressure"),lty=1,lwd=2,
col=c("blue","green","red"),
bty='n'
)
par(new=T)
plot(RG3[[1]],RG3[[2]],
ylim=c(103,106),
xlim=range(start.time,end.time),
type='l',col='black',lwd=2,
axes = FALSE,
xlab = NA,
ylab = NA,
)
axis(side=4,las=2,line=-2,col="blue")
mtext(side=4,text="River level(m)",line=1,col="blue")
legend("top",c("RG3 river level"),lty=1,lwd=2,
col=c("black"),bty="n")
dev.off()
jpeg(filename="ERT_south.jpeg",width=10,height=8,units='in',res=200,quality=100)
plot(south_river[,1],south_river[,2],type="l",ylim=c(0,3),col="blue",lwd=2,
xlim=range(start.time,end.time),
axes = FALSE,
xlab = NA,
ylab = NA,
)
lines(south_05[,1],south_05[,2],col="green",lwd=2)
lines(south_2[,1],south_2[,2],col="red",lwd=2)
axis(side=1,at=time.ticks,label=format(time.ticks,format="%m/%d/%y"))
mtext(side=1,text="Time (day)",line=3)
axis(side=2,las=2,line=0.5)
mtext(side=2,text="Pressure (m)",line=3)
legend("bottom",c("river","shallow pressure","deep pressure"),lty=1,lwd=2,
col=c("blue","green","red"),
bty='n'
)
par(new=T)
plot(RG3[[1]],RG3[[2]],
ylim=c(103,106),
xlim=range(start.time,end.time),
type='l',col='black',lwd=2,
axes = FALSE,
xlab = NA,
ylab = NA,
main="ERT_south"
)
axis(side=4,las=2,line=-2,col="blue")
mtext(side=4,text="River level(m)",line=1,col="blue")
legend("top",c("RG3 river level"),lty=1,lwd=2,
col=c("black"),bty="n")
dev.off()
|
# set locale to english on windows platform
Sys.setlocale(category = "LC_ALL", locale = "English_United States.1252")
# 1.read data
hpc <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
# 2.subset by dates 2007-02-01 and 2007-02-02
hpc_sub <- subset(hpc, Date == "1/2/2007"| Date == "2/2/2007")
# 3.open a png device
png(file="plot1.png")
hist(as.numeric(hpc_sub$Global_active_power),
col="red", main="Global Active Power",
xlab="Global Active Power (kilowatts)")
# 4.close device
dev.off() | /plot1.R | no_license | jhcheng/ExData_Plotting1 | R | false | false | 533 | r | # set locale to english on windows platform
Sys.setlocale(category = "LC_ALL", locale = "English_United States.1252")
# 1.read data
hpc <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
# 2.subset by dates 2007-02-01 and 2007-02-02
hpc_sub <- subset(hpc, Date == "1/2/2007"| Date == "2/2/2007")
# 3.open a png device
png(file="plot1.png")
hist(as.numeric(hpc_sub$Global_active_power),
col="red", main="Global Active Power",
xlab="Global Active Power (kilowatts)")
# 4.close device
dev.off() |
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{hcr_set_recErrors}
\alias{hcr_set_recErrors}
\title{HCR: Setup of recruitment error structure}
\usage{
hcr_set_recErrors(d, ctr)
}
\arguments{
\item{d}{XXX}
\item{ctr}{XXX}
}
\description{
XXX
}
| /man/hcr_set_recErrors.Rd | no_license | einarhjorleifsson/fishvise | R | false | false | 256 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{hcr_set_recErrors}
\alias{hcr_set_recErrors}
\title{HCR: Setup of recruitment error structure}
\usage{
hcr_set_recErrors(d, ctr)
}
\arguments{
\item{d}{XXX}
\item{ctr}{XXX}
}
\description{
XXX
}
|
#--------------------------------------------------------------------------------------------------
# LOAD DATA
#--------------------------------------------------------------------------------------------------
## library(rcellminerUtilsCDB)
load("./data/drugSynonymTab.RData")
temptable=drugSynonymTab
dim(temptable)
# 738 7
temptable[[145,1]]
## [1] "LMP744" "LMP-744" "MJ-III-65" "INDENOISOQUINOLINE"
temptable[[145,4]] <- "MJ-III-65"
drugSynonymTab=temptable
# save new drug synonyms
save(drugSynonymTab, file = "data/drugSynonymTab.RData")
## end | /inst/drugMatching/UpdateDrugMatchTab_LMP744_GDSC.R | no_license | CBIIT/rcellminerUtilsCDB | R | false | false | 589 | r | #--------------------------------------------------------------------------------------------------
# LOAD DATA
#--------------------------------------------------------------------------------------------------
## library(rcellminerUtilsCDB)
load("./data/drugSynonymTab.RData")
temptable=drugSynonymTab
dim(temptable)
# 738 7
temptable[[145,1]]
## [1] "LMP744" "LMP-744" "MJ-III-65" "INDENOISOQUINOLINE"
temptable[[145,4]] <- "MJ-III-65"
drugSynonymTab=temptable
# save new drug synonyms
save(drugSynonymTab, file = "data/drugSynonymTab.RData")
## end |
# ....###....##....##....###....##.......##....##..######..####..######.
# ...##.##...###...##...##.##...##........##..##..##....##..##..##....##
# ..##...##..####..##..##...##..##.........####...##........##..##......
# .##.....##.##.##.##.##.....##.##..........##.....######...##...######.
# .#########.##..####.#########.##..........##..........##..##........##
# .##.....##.##...###.##.....##.##..........##....##....##..##..##....##
# .##.....##.##....##.##.....##.########....##.....######..####..######.
#
# .########..##........#######...######..##....##.....#######.
# .##.....##.##.......##.....##.##....##.##...##.....##.....##
# .##.....##.##.......##.....##.##.......##..##.............##
# .########..##.......##.....##.##.......#####........#######.
# .##.....##.##.......##.....##.##.......##..##......##.......
# .##.....##.##.......##.....##.##....##.##...##.....##.......
# .########..########..#######...######..##....##....#########
require(sss)
require(Biobase)
require(ggplot2)
require(breastCancerTRANSBIG)
require(ggplot2)
require(ROCR)
require(hgu133a.db)
require(synapseClient)
## BINARY MODEL OF 'ER Status' USING SSS
sssERFit <- sss(trainScore ~ t(trainExpress))
# EVALUATE AND VISUALIZE TRAINING Y-HAT
trainScoreHat <- predict(sssERFit, newdata = t(trainExpress))
trainScoreDF <- as.data.frame(cbind(trainScore, trainScoreHat))
colnames(trainScoreDF) <- c("yTrain", "yTrainHat")
trainBoxPlot <- ggplot(trainScoreDF, aes(factor(yTrain), yTrainHat)) +
geom_boxplot() +
geom_jitter(aes(colour = as.factor(yTrain)), size = 4) +
opts(title = "ER SSS Model Training Set Hat") +
ylab("Training Set ER Prediction") +
xlab("True ER Status") +
opts(plot.title = theme_text(size = 14))
png(file = "trainBoxPlot.png", bg = "transparent", width = 1024,
height = 768)
trainBoxPlot
dev.off() | /analysisBlock2.R | no_license | Sage-Bionetworks/synapsify-demo | R | false | false | 1,830 | r | # ....###....##....##....###....##.......##....##..######..####..######.
# ...##.##...###...##...##.##...##........##..##..##....##..##..##....##
# ..##...##..####..##..##...##..##.........####...##........##..##......
# .##.....##.##.##.##.##.....##.##..........##.....######...##...######.
# .#########.##..####.#########.##..........##..........##..##........##
# .##.....##.##...###.##.....##.##..........##....##....##..##..##....##
# .##.....##.##....##.##.....##.########....##.....######..####..######.
#
# .########..##........#######...######..##....##.....#######.
# .##.....##.##.......##.....##.##....##.##...##.....##.....##
# .##.....##.##.......##.....##.##.......##..##.............##
# .########..##.......##.....##.##.......#####........#######.
# .##.....##.##.......##.....##.##.......##..##......##.......
# .##.....##.##.......##.....##.##....##.##...##.....##.......
# .########..########..#######...######..##....##....#########
require(sss)
require(Biobase)
require(ggplot2)
require(breastCancerTRANSBIG)
require(ggplot2)
require(ROCR)
require(hgu133a.db)
require(synapseClient)
## BINARY MODEL OF 'ER Status' USING SSS
sssERFit <- sss(trainScore ~ t(trainExpress))
# EVALUATE AND VISUALIZE TRAINING Y-HAT
trainScoreHat <- predict(sssERFit, newdata = t(trainExpress))
trainScoreDF <- as.data.frame(cbind(trainScore, trainScoreHat))
colnames(trainScoreDF) <- c("yTrain", "yTrainHat")
trainBoxPlot <- ggplot(trainScoreDF, aes(factor(yTrain), yTrainHat)) +
geom_boxplot() +
geom_jitter(aes(colour = as.factor(yTrain)), size = 4) +
opts(title = "ER SSS Model Training Set Hat") +
ylab("Training Set ER Prediction") +
xlab("True ER Status") +
opts(plot.title = theme_text(size = 14))
png(file = "trainBoxPlot.png", bg = "transparent", width = 1024,
height = 768)
trainBoxPlot
dev.off() |
library(shiny)
library(tidyverse)
library(magrittr)
library(gapminder)
gapminder %<>% mutate_at(c("year", "country"), as.factor)
gapminder_years = levels(gapminder$year) %>% str_sort()
gapminder_countries = levels(gapminder$country)
dataPanel <- tabPanel("Data", tableOutput("data"))
plotPanel <- tabPanel("Plot",
fluidRow(
column(width = 8,
plotOutput("plot",
hover = hoverOpts(id = "plot_hover", delayType = "throttle"),
)),
column(width = 4,
verbatimTextOutput("plot_hoverinfo")
)
) #fluidRow
) # tabPanel
myHeader <- div(
selectInput(
inputId = "selYear",
label = "Select the Year",
multiple = TRUE,
choices = gapminder_years,
selected = c(gapminder_years[1])
),
selectInput(
inputId = "selCountry",
label = "Select the Country",
multiple = TRUE,
choices = gapminder_countries,
selected = c(gapminder_countries[1])
)
)
# Define UI for application that draws a histogram
ui <- navbarPage("shiny App",
dataPanel,
plotPanel,
header = myHeader
)
# Define server logic required to draw a histogram
server <- function(input, output) {
gapminder_year <- reactive({gapminder %>% filter(year %in% input$selYear, country %in% input$selCountry)})
output$data <- renderTable(gapminder_year())
#output$info <- renderPrint(toString(gapminder_years))
output$plot <- renderPlot(
ggplot(data=gapminder_year(), aes(x=country, y=pop, fill=year))
+ geom_bar(stat="identity", position=position_dodge())
)
}
# Run the application
shinyApp(ui = ui, server = server) | /app.R | no_license | bernardo-dauria/2021-rshiny-case-study | R | false | false | 1,833 | r | library(shiny)
library(tidyverse)
library(magrittr)
library(gapminder)
gapminder %<>% mutate_at(c("year", "country"), as.factor)
gapminder_years = levels(gapminder$year) %>% str_sort()
gapminder_countries = levels(gapminder$country)
dataPanel <- tabPanel("Data", tableOutput("data"))
plotPanel <- tabPanel("Plot",
fluidRow(
column(width = 8,
plotOutput("plot",
hover = hoverOpts(id = "plot_hover", delayType = "throttle"),
)),
column(width = 4,
verbatimTextOutput("plot_hoverinfo")
)
) #fluidRow
) # tabPanel
myHeader <- div(
selectInput(
inputId = "selYear",
label = "Select the Year",
multiple = TRUE,
choices = gapminder_years,
selected = c(gapminder_years[1])
),
selectInput(
inputId = "selCountry",
label = "Select the Country",
multiple = TRUE,
choices = gapminder_countries,
selected = c(gapminder_countries[1])
)
)
# Define UI for application that draws a histogram
ui <- navbarPage("shiny App",
dataPanel,
plotPanel,
header = myHeader
)
# Define server logic required to draw a histogram
server <- function(input, output) {
gapminder_year <- reactive({gapminder %>% filter(year %in% input$selYear, country %in% input$selCountry)})
output$data <- renderTable(gapminder_year())
#output$info <- renderPrint(toString(gapminder_years))
output$plot <- renderPlot(
ggplot(data=gapminder_year(), aes(x=country, y=pop, fill=year))
+ geom_bar(stat="identity", position=position_dodge())
)
}
# Run the application
shinyApp(ui = ui, server = server) |
# Extract the data for the required dates
dataFile <- paste0(getwd(),"/household_power_consumption.txt")
startIndex <- grep("1/2/2007", readLines(dataFile))[1]
endIndex <- grep("3/2/2007", readLines(dataFile))[1]
data <- read.table(dataFile,
header=FALSE,sep=";",na.strings="?",stringsAsFactors=FALSE,
skip=startIndex-1,nrows=endIndex-startIndex)
attr(data,"names") <- read.table(dataFile,
header=FALSE,sep=";",na.strings="?",stringsAsFactors=FALSE,
nrows=1)
# Create png file for third plot
png(filename=paste0(getwd(),"/ExData_Plotting1/plot3.png"))
plot(data$Global_active_power,pch=NA_integer_,xaxt="n",ylab="Energy sub metering"
,xlab="",ylim=c(0,38))
lines(data$Sub_metering_2,col="red")
lines(data$Sub_metering_3,col="blue")
lines(data$Sub_metering_1,col="black")
axis(1, at=c(1,1440,2880), labels=c("Thu","Fri","Sat"))
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),lty=rep(1,3))
dev.off() | /plot3.R | no_license | frenzyfortune/ExData_Plotting1 | R | false | false | 1,061 | r | # Extract the data for the required dates
dataFile <- paste0(getwd(),"/household_power_consumption.txt")
startIndex <- grep("1/2/2007", readLines(dataFile))[1]
endIndex <- grep("3/2/2007", readLines(dataFile))[1]
data <- read.table(dataFile,
header=FALSE,sep=";",na.strings="?",stringsAsFactors=FALSE,
skip=startIndex-1,nrows=endIndex-startIndex)
attr(data,"names") <- read.table(dataFile,
header=FALSE,sep=";",na.strings="?",stringsAsFactors=FALSE,
nrows=1)
# Create png file for third plot
png(filename=paste0(getwd(),"/ExData_Plotting1/plot3.png"))
plot(data$Global_active_power,pch=NA_integer_,xaxt="n",ylab="Energy sub metering"
,xlab="",ylim=c(0,38))
lines(data$Sub_metering_2,col="red")
lines(data$Sub_metering_3,col="blue")
lines(data$Sub_metering_1,col="black")
axis(1, at=c(1,1440,2880), labels=c("Thu","Fri","Sat"))
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),lty=rep(1,3))
dev.off() |
########## Week 1 - Lecture Notes ##########
myfunction <- function() {
x <- rnorm(100)
print(x)
mean(x)
}
second <- function(x) {
x + rnorm(length(x))
}
## Missing Values - NaN is NA but NA is not NaN. NA's can have classes
x <- c(1, 2, NA, 20, 3)
is.na(x)
is.nan(x)
x <- c(1, 2, NaN, NA, 4)
is.na(x)
is.nan(x)
## Data frame - stores tabular data. Every element of the list has to be of the same length. Unlike
## a matrix, each column (element) can be a different class. Special attributes, row.names - each
## row has a name. Created by calling read.table() or read.csv(). data.frame() builds a dataframe
## from scratch.
x <- data.frame(foo = 1:4, bar = c(T, T, F, F))
x
nrow(x)
ncol(x)
## Names - R objects can have names! No name by default but can assign.
x <- 1:3
names(x)
names(x) <- c("foo", "bar", "norf")
x
names(x)
# Lists can also have names!
x <- list(a = 1, b = 2, c = 3)
x
#matrices can have names - dimnames()
m <- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c("a", "b"), c("c", "d"))
m
## Reading Tabular Data - text files that returns a
# read.table()
# read.csv()
# Note: read.table reads in characters as factor as a default. This could be problematic for text
# analytics, so setting stringasfactor = FALSE may be desirable. Read.table returns a data frame
# Note read.table() is the same as read.csv() except that read.csv is comma deliminated by default
## Reading in Larger Tables
# READ THE HELP PAGE FOR read.table()!!
# Make sure enough RAM is allocated to store dataset
# Use the colClasses argument. set class to "numeric" makes R assume that all columns are numeric.
# This makes R run MUCH faster, often by x2!
# A quick and ditry way to figure out the classes of wach column is the following:
initial <- read.table("datatable.txt", nrows = 100)
classes <- sapply(initial, class)
tabAll <- read.table("datatable.txt", colClasses = classes)
#set nrows will not help R run faster but will help with memorty usage.
## Textual Data formats
#dput - can only be used on a single R object
y <- data.frame(a = 1, b = "a")
dput(y)
dput(y, file = "y.R")
new.y <- dget("y.R")
new.y
#dunp - similar to dput but can be applied to several objects
x <- "foo"
y <- data.frame(a = 1, b = "a")
dump(c("x", "y"), file = "data.R")
rm(x, y)
source("data.R")
y
x
#Reading lines of a text file - readLines()
con <- url("http://jhsph.edu", "r")
x <- readLines(con)
head(x)
##Subsetting objects in R
# [] - always returns an object of the same class as the original, can be used to select more than
# one element (there is one excption)
# [[]] - is used to extract elements of a list or a data frame, it can only be used to extract a
# single element and the class of the returned object will not necessarily be a list or
# data frame
# $ - is used to extract elements of a list or data frame by name, semantics are similar to that
# of [[]]
# Examples, numeric indexes
x <- c("a", "b", "c", "c", "d", "a")
x[1] # exctracts the first element of x
x[2] # extracts the second element of x
x[1:4] # extracts the first four elements of x
#examples of logical index
x[x > "a"]
u <- x > "a"
x[u]
##Subsetting lists
x <- list(foo = 1:4, bar = 0.6)
x[1] #list that contains 1 through 4
x[[1]] #sequence of 1 through 4
x$foo
x$bar #give element that is associate with the name "bar"
x[["bar"]]
x["bar"]
#Note using the name is nice because you do not have to remember the numeric index
#extract multiple elements form a list
x <- list(foo = 1:4, ba = 0.6, baz = "hello")
x[c(1 ,3)] # returns list of foo and baz. Can't use [[]]
# but we can use[[]] for other things:
name <- "foo"
x[[name]]
x$name # element 'name' doesn't exist!
x$foo # element 'foo' does exist
# Subsetting nested elements of a list. The [[]] can take an integer sequence
x <- list(a = list(10, 12, 14), b = c(3.14, 2.81))
x[[c(1, 3)]]
x[[1]][[3]]
x[[c(2, 1)]]
## Subestting Matrices
x <- matrix(1:6, 2, 3)
x[1,2]
x[2,1]
# Indices can also be missing
x[1, ] #first row of matrix
x[, 2] #second column of matrix
# NOTE: by default, when a single element of a matrix is retrieved, it is returned as a vector of
#length 1 rather than a 1x1 matrix. This behavior can be altered by setting drop = FALSE
x[1, 2]
x[1, 2, drop = FALSE]
x[1, ]
x[1, , drop = FALSE]
##Removing NA values
x <- c(1, 2, NA, 4, NA, 5)
bad <- is.na(x)
x[!bad]
#more complicated case from multiple elements
x <- c(1, 2, NA, 4, NA, 5, 6)
y <- c("a", "b", NA, "d", NA, "f", NA)
good <- complete.cases(x, y)
good
x[good]
y[good]
#example
airquality[1:6, ]
good <- complete.cases(airquality)
airquality[good, ][1:6, ]
########## Week 1 - Quiz ##########
x <- 4
class(x)
x <- c(4, TRUE)
class(x)
x <- 1:4
y <- 2:3
x+y
x <- c(3, 5, 1, 10, 12, 6)
x[x < 6] <- 0
x[x <= 5] <- 0
x[x %in% 1:5] <- 0
main_data <- read.csv("hw1_data.csv", header = TRUE)
main_data
names(main_data)
head(main_data, 2)
tail(main_data, 2)
View(main_data)
missing_ozone <- sum(is.na(main_data$Ozone))
mean_ozone <- mean(main_data$Ozone[!is.na(main_data$Ozone)])
complete_main <- complete.cases(main_data)
main <- main_data[complete_main,]
filter_ozone <- as.logical(main$Ozone > 31)
main <- main[filter_ozone,]
filter_temp <- as.logical(main$Temp > 90)
main<- main[filter_temp,]
mean(main$Solar.R)
filter_month <- as.logical(main_data$Month == 6)
main2 <- main_data[filter_month,]
mean(main2$Temp)
filter_month <- as.logical(main_data$Month == 5 & !is.na(main_data$Ozone))
main3 <- main_data[filter_month,]
max(main3$Ozone)
ozone <- main_data[,1]
complete_data <- main_data[is.na(main_data$Ozone)]
View(complete_data)
filter_data <- main_data[Ozone > 31]
| /hw1.R | no_license | zarastria/Coursera---Programming-with-R | R | false | false | 5,662 | r | ########## Week 1 - Lecture Notes ##########
myfunction <- function() {
x <- rnorm(100)
print(x)
mean(x)
}
second <- function(x) {
x + rnorm(length(x))
}
## Missing Values - NaN is NA but NA is not NaN. NA's can have classes
x <- c(1, 2, NA, 20, 3)
is.na(x)
is.nan(x)
x <- c(1, 2, NaN, NA, 4)
is.na(x)
is.nan(x)
## Data frame - stores tabular data. Every element of the list has to be of the same length. Unlike
## a matrix, each column (element) can be a different class. Special attributes, row.names - each
## row has a name. Created by calling read.table() or read.csv(). data.frame() builds a dataframe
## from scratch.
x <- data.frame(foo = 1:4, bar = c(T, T, F, F))
x
nrow(x)
ncol(x)
## Names - R objects can have names! No name by default but can assign.
x <- 1:3
names(x)
names(x) <- c("foo", "bar", "norf")
x
names(x)
# Lists can also have names!
x <- list(a = 1, b = 2, c = 3)
x
#matrices can have names - dimnames()
m <- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c("a", "b"), c("c", "d"))
m
## Reading Tabular Data - text files that returns a
# read.table()
# read.csv()
# Note: read.table reads in characters as factor as a default. This could be problematic for text
# analytics, so setting stringasfactor = FALSE may be desirable. Read.table returns a data frame
# Note read.table() is the same as read.csv() except that read.csv is comma deliminated by default
## Reading in Larger Tables
# READ THE HELP PAGE FOR read.table()!!
# Make sure enough RAM is allocated to store dataset
# Use the colClasses argument. set class to "numeric" makes R assume that all columns are numeric.
# This makes R run MUCH faster, often by x2!
# A quick and ditry way to figure out the classes of wach column is the following:
initial <- read.table("datatable.txt", nrows = 100)
classes <- sapply(initial, class)
tabAll <- read.table("datatable.txt", colClasses = classes)
#set nrows will not help R run faster but will help with memorty usage.
## Textual Data formats
#dput - can only be used on a single R object
y <- data.frame(a = 1, b = "a")
dput(y)
dput(y, file = "y.R")
new.y <- dget("y.R")
new.y
#dunp - similar to dput but can be applied to several objects
x <- "foo"
y <- data.frame(a = 1, b = "a")
dump(c("x", "y"), file = "data.R")
rm(x, y)
source("data.R")
y
x
#Reading lines of a text file - readLines()
con <- url("http://jhsph.edu", "r")
x <- readLines(con)
head(x)
##Subsetting objects in R
# [] - always returns an object of the same class as the original, can be used to select more than
# one element (there is one excption)
# [[]] - is used to extract elements of a list or a data frame, it can only be used to extract a
# single element and the class of the returned object will not necessarily be a list or
# data frame
# $ - is used to extract elements of a list or data frame by name, semantics are similar to that
# of [[]]
# Examples, numeric indexes
x <- c("a", "b", "c", "c", "d", "a")
x[1] # exctracts the first element of x
x[2] # extracts the second element of x
x[1:4] # extracts the first four elements of x
#examples of logical index
x[x > "a"]
u <- x > "a"
x[u]
##Subsetting lists
x <- list(foo = 1:4, bar = 0.6)
x[1] #list that contains 1 through 4
x[[1]] #sequence of 1 through 4
x$foo
x$bar #give element that is associate with the name "bar"
x[["bar"]]
x["bar"]
#Note using the name is nice because you do not have to remember the numeric index
#extract multiple elements form a list
x <- list(foo = 1:4, ba = 0.6, baz = "hello")
x[c(1 ,3)] # returns list of foo and baz. Can't use [[]]
# but we can use[[]] for other things:
name <- "foo"
x[[name]]
x$name # element 'name' doesn't exist!
x$foo # element 'foo' does exist
# Subsetting nested elements of a list. The [[]] can take an integer sequence
x <- list(a = list(10, 12, 14), b = c(3.14, 2.81))
x[[c(1, 3)]]
x[[1]][[3]]
x[[c(2, 1)]]
## Subestting Matrices
x <- matrix(1:6, 2, 3)
x[1,2]
x[2,1]
# Indices can also be missing
x[1, ] #first row of matrix
x[, 2] #second column of matrix
# NOTE: by default, when a single element of a matrix is retrieved, it is returned as a vector of
#length 1 rather than a 1x1 matrix. This behavior can be altered by setting drop = FALSE
x[1, 2]
x[1, 2, drop = FALSE]
x[1, ]
x[1, , drop = FALSE]
##Removing NA values
x <- c(1, 2, NA, 4, NA, 5)
bad <- is.na(x)
x[!bad]
#more complicated case from multiple elements
x <- c(1, 2, NA, 4, NA, 5, 6)
y <- c("a", "b", NA, "d", NA, "f", NA)
good <- complete.cases(x, y)
good
x[good]
y[good]
#example
airquality[1:6, ]
good <- complete.cases(airquality)
airquality[good, ][1:6, ]
########## Week 1 - Quiz ##########
x <- 4
class(x)
x <- c(4, TRUE)
class(x)
x <- 1:4
y <- 2:3
x+y
x <- c(3, 5, 1, 10, 12, 6)
x[x < 6] <- 0
x[x <= 5] <- 0
x[x %in% 1:5] <- 0
main_data <- read.csv("hw1_data.csv", header = TRUE)
main_data
names(main_data)
head(main_data, 2)
tail(main_data, 2)
View(main_data)
missing_ozone <- sum(is.na(main_data$Ozone))
mean_ozone <- mean(main_data$Ozone[!is.na(main_data$Ozone)])
complete_main <- complete.cases(main_data)
main <- main_data[complete_main,]
filter_ozone <- as.logical(main$Ozone > 31)
main <- main[filter_ozone,]
filter_temp <- as.logical(main$Temp > 90)
main<- main[filter_temp,]
mean(main$Solar.R)
filter_month <- as.logical(main_data$Month == 6)
main2 <- main_data[filter_month,]
mean(main2$Temp)
filter_month <- as.logical(main_data$Month == 5 & !is.na(main_data$Ozone))
main3 <- main_data[filter_month,]
max(main3$Ozone)
ozone <- main_data[,1]
complete_data <- main_data[is.na(main_data$Ozone)]
View(complete_data)
filter_data <- main_data[Ozone > 31]
|
install.packages('tidyverse')
install.packages('visnetwork')
install.packages('sqldf')
install.packages('stringi')
install.packages('stringr')
install.packages('RSelenium')
install.packages('rvest')
install.packages('reshape2')
install.packages('readxl')
install.packages('rattle')
install.packages('lubridate')
install.packages('magrittr')
install.packages('knittr')
install.packages('jsonlite')
install.packages('datapasta')
install.packages('data.table')
install.packages('rjq')
| /R/installPackages.R | no_license | hpiedcoq/dobuke | R | false | false | 482 | r | install.packages('tidyverse')
install.packages('visnetwork')
install.packages('sqldf')
install.packages('stringi')
install.packages('stringr')
install.packages('RSelenium')
install.packages('rvest')
install.packages('reshape2')
install.packages('readxl')
install.packages('rattle')
install.packages('lubridate')
install.packages('magrittr')
install.packages('knittr')
install.packages('jsonlite')
install.packages('datapasta')
install.packages('data.table')
install.packages('rjq')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCBP.R
\name{getCBP}
\alias{getCBP}
\title{Prepare CBP data}
\usage{
getCBP(
years = 2017,
location = "national",
industry = 0,
LFO = "-",
input_path,
output_path
)
}
\arguments{
\item{years}{(integer) any integer between 2000 and 2017 is supported.}
\item{location}{(character) options are "county", "state", "national".}
\item{industry}{(integer) options are 0, 2, 3, 4, 6.}
\item{LFO}{(character) legal form of organization.}
}
\description{
Prepare CBP data
}
| /EconData/man/getCBP.Rd | permissive | setzler/EconData | R | false | true | 559 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCBP.R
\name{getCBP}
\alias{getCBP}
\title{Prepare CBP data}
\usage{
getCBP(
years = 2017,
location = "national",
industry = 0,
LFO = "-",
input_path,
output_path
)
}
\arguments{
\item{years}{(integer) any integer between 2000 and 2017 is supported.}
\item{location}{(character) options are "county", "state", "national".}
\item{industry}{(integer) options are 0, 2, 3, 4, 6.}
\item{LFO}{(character) legal form of organization.}
}
\description{
Prepare CBP data
}
|
## Below are a pair of functions that cache the inverse of a matrix
## makeCacheMatrix creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## stores the cached value
## initialise to NULL
cache<-NULL
set<-function(y){
x<<-y
cache<<-NULL
}
# get the value of the matrix
get<-function()x
# invert the matrix and store in cache
setMatrix<-function(inverse)cache<<-inverse
# get the inverted matrix from cache
getInverse<-function()cache
# return the created functions to the working environment
list(set=set,get=get,
setMatrix=setMatrix,
getInverse=getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## attempt to get the inverse of the matrix stored in cache
cache<-x$getInverse()
## return inverted matrix from cache if it exists
## else create the matrix in working environment
if(!is.null(cache)){
message("getting cached data")
# display matrix in console
return(cache)
}
# create matrix since it does not exist
matrix <-x$get()
# make sure matrix is square and invertible
# if not, handle exception cleanly
tryCatch({
# set and return inverse of matrix
cache<-solve(matrix, ...)
},
error=function(e){
message("Error:")
message(e)
return(NA)
},
warning=function(e){
message("Warning:")
message(e)
return(NA)
},
finally={
# set inverted matrix in cache
x$setMatrix(cache)
})
## Return a matrix that is the inverse of 'x'
return(cache)
}
| /cachematrix.R | no_license | ashtearty/ProgrammingAssignment2 | R | false | false | 2,049 | r | ## Below are a pair of functions that cache the inverse of a matrix
## makeCacheMatrix creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## stores the cached value
## initialise to NULL
cache<-NULL
set<-function(y){
x<<-y
cache<<-NULL
}
# get the value of the matrix
get<-function()x
# invert the matrix and store in cache
setMatrix<-function(inverse)cache<<-inverse
# get the inverted matrix from cache
getInverse<-function()cache
# return the created functions to the working environment
list(set=set,get=get,
setMatrix=setMatrix,
getInverse=getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## attempt to get the inverse of the matrix stored in cache
cache<-x$getInverse()
## return inverted matrix from cache if it exists
## else create the matrix in working environment
if(!is.null(cache)){
message("getting cached data")
# display matrix in console
return(cache)
}
# create matrix since it does not exist
matrix <-x$get()
# make sure matrix is square and invertible
# if not, handle exception cleanly
tryCatch({
# set and return inverse of matrix
cache<-solve(matrix, ...)
},
error=function(e){
message("Error:")
message(e)
return(NA)
},
warning=function(e){
message("Warning:")
message(e)
return(NA)
},
finally={
# set inverted matrix in cache
x$setMatrix(cache)
})
## Return a matrix that is the inverse of 'x'
return(cache)
}
|
########### Calculate pairwise base pair distance ##########
#
# MF 8/20/2018
#
############################################################
# Set Working Directory / Load Packages -----------------------------------
setwd("D:/Pacific cod/DataAnalysis/PCod-Korea-repo/analyses/LD")
library(dplyr)
library(ggplot2)
# Import Data -------------------------------------------------------------
samdat <- read.csv("batch_8_verif_alignment_positions.csv")
head(samdat)
# Create Function ------------------------------------------------------
pairwise_dist <- function(data, scaffold){
## subset the data frame to include only loci for that scaffold, and arrange by base pair position
mydat <- data %>%
filter(Scaffold == scaffold) %>%
arrange(Start_bp)
## initiate empty final data frame
final_df <- data.frame(Locus1 = as.character(),
Locus2 = as.character(),
Scaffold = as.character(),
Distance = as.numeric(),
MQ = as.numeric())
## for "i" in a list of numbers 1: length of data
for(i in seq(1,length(mydat$Locus))){
### save new locus at position "i"
tmp_locus <- mydat$Locus[i]
### save position of that locus as the tmp_start
tmp_start <- mydat$Start_bp[i]
### select all rows below tmp_locus (already did calculations for rows above)
to_calc <- slice(mydat, i+1:n())
### rename the locus in the first column as the first locus
colnames(to_calc)[1] <- "Locus1"
colnames(to_calc)[3] <- "Locus1_Start"
### add columns with tmp_locus name / start position, and the distance between tmp_locus / each row's locus
dist_df <- to_calc %>%
mutate(Locus2 = tmp_locus) %>%
mutate(Locus2_Start = tmp_start) %>%
mutate(Distance = abs(tmp_start - Locus1_Start))
### reorder columns
dist_df <- select(dist_df, c(Locus1, Locus2, Scaffold, Locus1_Start, Locus2_Start, Distance, MQ))
final_df <- rbind(final_df, dist_df)
}
return(final_df)
}
# Apply function to each scaffold -----------------------------------------
outdat <- data.frame(Locus1 = as.character(),
Locus2 = as.character(),
Scaffold = as.character(),
Distance = as.numeric(),
MQ = as.numeric())
for(s in unique(samdat$Scaffold)){
newdat <- pairwise_dist(data = samdat, scaffold = s)
outdat <- rbind(outdat,newdat)
}
# Write out data frame ----------------------------------------------------
write.csv(file="batch_8_verif_alignment_pairwise_dist.csv", x=outdat, row.names=FALSE)
# Distribution of distances -----------------------------------------------
ggplot(outdat, aes(outdat$Distance)) +
geom_bar()
| /analyses/LD/calc_pairwise_bp_dist.R | no_license | mfisher5/PCod-Korea-repo | R | false | false | 2,759 | r | ########### Calculate pairwise base pair distance ##########
#
# MF 8/20/2018
#
############################################################
# Set Working Directory / Load Packages -----------------------------------
setwd("D:/Pacific cod/DataAnalysis/PCod-Korea-repo/analyses/LD")
library(dplyr)
library(ggplot2)
# Import Data -------------------------------------------------------------
samdat <- read.csv("batch_8_verif_alignment_positions.csv")
head(samdat)
# Create Function ------------------------------------------------------
pairwise_dist <- function(data, scaffold){
## subset the data frame to include only loci for that scaffold, and arrange by base pair position
mydat <- data %>%
filter(Scaffold == scaffold) %>%
arrange(Start_bp)
## initiate empty final data frame
final_df <- data.frame(Locus1 = as.character(),
Locus2 = as.character(),
Scaffold = as.character(),
Distance = as.numeric(),
MQ = as.numeric())
## for "i" in a list of numbers 1: length of data
for(i in seq(1,length(mydat$Locus))){
### save new locus at position "i"
tmp_locus <- mydat$Locus[i]
### save position of that locus as the tmp_start
tmp_start <- mydat$Start_bp[i]
### select all rows below tmp_locus (already did calculations for rows above)
to_calc <- slice(mydat, i+1:n())
### rename the locus in the first column as the first locus
colnames(to_calc)[1] <- "Locus1"
colnames(to_calc)[3] <- "Locus1_Start"
### add columns with tmp_locus name / start position, and the distance between tmp_locus / each row's locus
dist_df <- to_calc %>%
mutate(Locus2 = tmp_locus) %>%
mutate(Locus2_Start = tmp_start) %>%
mutate(Distance = abs(tmp_start - Locus1_Start))
### reorder columns
dist_df <- select(dist_df, c(Locus1, Locus2, Scaffold, Locus1_Start, Locus2_Start, Distance, MQ))
final_df <- rbind(final_df, dist_df)
}
return(final_df)
}
# Apply function to each scaffold -----------------------------------------
outdat <- data.frame(Locus1 = as.character(),
Locus2 = as.character(),
Scaffold = as.character(),
Distance = as.numeric(),
MQ = as.numeric())
for(s in unique(samdat$Scaffold)){
newdat <- pairwise_dist(data = samdat, scaffold = s)
outdat <- rbind(outdat,newdat)
}
# Write out data frame ----------------------------------------------------
write.csv(file="batch_8_verif_alignment_pairwise_dist.csv", x=outdat, row.names=FALSE)
# Distribution of distances -----------------------------------------------
ggplot(outdat, aes(outdat$Distance)) +
geom_bar()
|
context(paste("Symbolic differentiation rules v", packageVersion("Deriv"), sep=""))
lc_orig=Sys.getlocale(category = "LC_COLLATE")
Sys.setlocale(category = "LC_COLLATE", locale = "C")
num_test_deriv <- function(fun, larg, narg=1, h=1.e-5, tolerance=2000*h^2) {
# test the first derivative of a function fun() (given as a character
# string) by Deriv() and central difference.
# larg is a named list of parameters to pass to fun
# narg indicates by which of fun's arguments the differentiation must be made
# h is the small perturbation in the central differentiation: x-h and x+h
# Parameter tolerance is used in comparison test.
if (length(names(larg)) == 0)
stop(sprintf("No argument for function %s() to differentiate. There must be at leat one argument.", fun))
if (h <= 0)
stop("Parameter h must be positive")
larg_ph=larg_mh=larg
larg_ph[[narg]]=larg_ph[[narg]]+h
larg_mh[[narg]]=larg_mh[[narg]]-h
f_ph=do.call(fun, larg_ph)
f_mh=do.call(fun, larg_mh)
dnum=(f_ph-f_mh)/(2*h)
sym_larg=larg
nm_x=names(larg)[narg]
sym_larg[[narg]]=as.symbol(nm_x)
flang=as.symbol(fun)
dsym=do.call(as.function(c(sym_larg, Deriv(as.call(c(flang, sym_larg)), nm_x))), larg, quote=TRUE)
#cat(sprintf("comparing %s by %s\n", format1(as.call(c(flang, larg))), nm_x))
expect_equal(dnum, dsym, tolerance=tolerance, info=sprintf("%s by %s", format1(as.call(c(flang, larg))), nm_x))
}
f=function(x) {} # empty place holder
expect_equal_deriv <- function(t, r, nmvar="x") {
test=substitute(t)
ref=substitute(r)
# compare as language
ans=Deriv(test, nmvar, cache.exp=FALSE)
#print(deparse(ans))
eval(bquote(expect_equal(format1(quote(.(ans))), format1(quote(.(ref))))))
# compare as string
ans=Deriv(format1(test), nmvar, cache.exp=FALSE)
#print(ans)
eval(bquote(expect_equal(.(ans), format1(quote(.(ref))))))
# compare as formula
ans=Deriv(call("~", test), nmvar, cache.exp=FALSE)
#print(deparse(ans))
eval(bquote(expect_equal(format1(quote(.(ans))), format1(quote(.(ref))))))
# compare as expression
ans=Deriv(as.expression(test), nmvar, cache.exp=FALSE)
#print(deparse(ans))
eval(bquote(expect_equal(format1(.(ans)), format1(expression(.(ref))))))
# compare as function
body(f)=test
ans=Deriv(f, nmvar, cache.exp=FALSE)
body(f)=ref
#cat("\nf deriv=", format1(ans), "\n", sep="")
#cat("\nsimplify=", format1(Simplify(ans)), "\n", sep="")
#cat("f ref=", format1(f), "\n", sep="")
eval(bquote(expect_equal(quote(.(ans)), quote(.(f)))))
# compare with central differences
x=seq(0.1, 1, len=10)
h=1.e-7
suppressWarnings(f1 <- try(sapply(x-h, function(val) eval(test, list(x=val))), silent=TRUE))
suppressWarnings(f2 <- try(sapply(x+h, function(val) eval(test, list(x=val))), silent=TRUE))
if (!inherits(f1, "try-error") && !inherits(f2, "try-error")) {
numder=(f2-f1)/h/2
refder=sapply(x, function(val) eval(ref, list(x=val)))
i=is.finite(refder) & is.finite(numder)
expect_more_than(sum(i), 0, label=sprintf("length of central diff for %s", format1(test)))
expect_equal(numder[i], refder[i], tolerance=5.e-8, label=sprintf("Central diff. of '%s'", format1(test)), expected.label=sprintf("'%s'", format1(ref)))
}
}
expect_equal_format1 <- function(t, r) {
eval(bquote(expect_equal(format1(.(t)), format1(.(r)))))
}
test_that("elementary functions", {
expect_equal(Deriv("x", "x"), "1")
expect_equal(Deriv(quote(x), "x"), 1)
expect_equal(Deriv(quote((x)), "x"), 1)
expect_equal_deriv(x**2, 2*x)
expect_equal_deriv(x**n, n*x^(n-1))
expect_equal_deriv(2**x, 0.693147180559945 * 2^x)
expect_equal_deriv(sin(x), cos(x))
expect_equal_deriv(cos(x), -sin(x))
expect_equal_deriv(tan(x), 1/cos(x)^2)
expect_equal_deriv(asin(x), 1/sqrt(1 - x^2))
expect_equal_deriv(acos(x), -(1/sqrt(1 - x^2)))
expect_equal_deriv(atan(x), 1/(1+x^2))
expect_equal_deriv(atan2(x, y), y/(x^2+y^2))
expect_equal_deriv(atan2(0.5, x), -(0.5/(0.25 + x^2)))
expect_equal_deriv(exp(x), exp(x))
expect_equal_deriv(expm1(x), exp(x))
expect_equal_deriv(log(x), 1/x)
expect_equal_deriv(log1p(x), 1/(1+x))
expect_equal_deriv(abs(x), sign(x))
expect_equal_deriv(sign(x), 0)
expect_equal_deriv(sinh(x), cosh(x))
expect_equal_deriv(cosh(x), sinh(x))
expect_equal_deriv(tanh(x), 1-tanh(x)^2)
})
if (getRversion() >= "3.1.0") {
test_that("trigonometric functions with pi", {
expect_equal_deriv(sinpi(x), pi*cospi(x))
expect_equal_deriv(cospi(x), -(pi*sinpi(x)))
expect_equal_deriv(tanpi(x), pi/cospi(x)**2)
})
}
test_that("special functions", {
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(x) - digamma(x + y)))
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(y) - digamma(x + y)), "y")
expect_equal_deriv(besselI(x, 0), besselI(x, 1))
expect_equal_deriv(besselI(x, 0, FALSE), besselI(x, 1))
expect_equal_deriv(besselI(x, 0, TRUE), besselI(x, 1, TRUE)-besselI(x, 0, TRUE))
expect_equal_deriv(besselI(x, 1), 0.5 * (besselI(x, 0) + besselI(x, 2)))
expect_equal_deriv(besselI(x, 1, FALSE), 0.5 * (besselI(x, 0) + besselI(x, 2)))
expect_equal_deriv(besselI(x, 1, TRUE), 0.5 * (besselI(x, 0, TRUE) + besselI(x, 2, TRUE))-besselI(x, 1, TRUE))
expect_equal_deriv(besselI(x, n), if (n == 0) besselI(x, 1) else 0.5 * (besselI(x, 1 + n) + besselI(x, n - 1)))
expect_equal_deriv(besselI(x, n, TRUE), (if (n == 0) besselI(x, 1, TRUE) else 0.5 * (besselI(x, 1 + n, TRUE) + besselI(x, n - 1, TRUE)))-besselI(x, n, TRUE))
expect_equal_deriv(besselK(x, 0), -besselK(x, 1))
expect_equal_deriv(besselK(x, 0, FALSE), -besselK(x, 1))
expect_equal_deriv(besselK(x, 0, TRUE), besselK(x, 0, TRUE)-besselK(x, 1, TRUE))
expect_equal_deriv(besselK(x, 1), -(0.5 * (besselK(x, 0) + besselK(x, 2))))
expect_equal_deriv(besselK(x, 1, FALSE), -(0.5 * (besselK(x, 0) + besselK(x, 2))))
expect_equal_deriv(besselK(x, 1, TRUE), besselK(x, 1, TRUE)-0.5 * (besselK(x, 0, TRUE) + besselK(x, 2, TRUE)))
expect_equal_deriv(besselK(x, n), if (n == 0) -besselK(x, 1) else -(0.5 * (besselK(x, 1 + n) + besselK(x, n - 1))))
expect_equal_deriv(besselK(x, n, FALSE), if (n == 0) -besselK(x, 1) else -(0.5 * (besselK(x, 1 + n) + besselK(x, n - 1))))
expect_equal_deriv(besselK(x, n, TRUE), besselK(x, n, TRUE)+if (n == 0) -besselK(x, 1, TRUE) else -(0.5 * (besselK(x, 1 + n, TRUE) + besselK(x, n - 1, TRUE))))
expect_equal_deriv(besselJ(x, 0), -besselJ(x, 1))
expect_equal_deriv(besselJ(x, 1), 0.5 * (besselJ(x, 0) - besselJ(x, 2)))
expect_equal_deriv(besselJ(x, n), if (n == 0) -besselJ(x, 1) else 0.5 * (besselJ(x, n - 1) - besselJ(x, 1 + n)))
expect_equal_deriv(besselY(x, 0), -besselY(x, 1))
expect_equal_deriv(besselY(x, 1), 0.5 * (besselY(x, 0) - besselY(x, 2)))
expect_equal_deriv(besselY(x, n), if (n == 0) -besselY(x, 1) else 0.5 * (besselY(x, n - 1) - besselY(x, 1 + n)))
expect_equal_deriv(gamma(x), digamma(x) * gamma(x))
expect_equal_deriv(lgamma(x), digamma(x))
expect_equal_deriv(digamma(x), trigamma(x))
expect_equal_deriv(trigamma(x), psigamma(x, 2L))
expect_equal_deriv(psigamma(x), psigamma(x, 1L))
expect_equal_deriv(psigamma(x, n), psigamma(x, 1L+n))
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(x) - digamma(x + y)))
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(y) - digamma(x + y)), "y")
expect_equal_deriv(lbeta(x, y), digamma(x) - digamma(x + y))
expect_equal_deriv(lbeta(x, y), digamma(y) - digamma(x + y), "y")
})
test_that("probability densities", {
expect_equal_deriv(dbinom(5,3,x), 3 * ((3 - 5 * x) * dbinom(5, 2, x)/(1 - x)^2))
expect_equal_deriv(dnorm(x, m=0.5), -(dnorm(x, 0.5, 1) * (x - 0.5)))
})
test_that("chain rule: multiply by a const", {
expect_equal_deriv(a*x, a)
expect_equal_deriv(a[1]*x, a[1])
expect_equal_deriv(a[[1]]*x, a[[1]])
expect_equal_deriv(a$b*x, a$b)
expect_equal_deriv((a*x)**2, 2*(a^2*x))
expect_equal_deriv((a*x)**n, a*n*(a*x)^(n-1))
expect_equal_deriv(sin(a*x), a*cos(a*x))
expect_equal_deriv(cos(a*x), -(a*sin(a*x)))
expect_equal_deriv(tan(a*x), a/cos(a*x)^2)
expect_equal_deriv(exp(a*x), a*exp(a*x))
expect_equal_deriv(log(a*x), 1/x)
})
test_that("particular cases", {
expect_equal_deriv(log(x, x), 0)
expect_equal_deriv(x^n+sin(n*x), n * (cos(n * x) + x^(n - 1)))
expect_equal_deriv(x*(1-x), 1-2*x)
expect_equal_deriv(x^x, x^x+x^x*log(x))
})
# test AD and caching
# gaussian function
g <- function(x, m=0, s=1) exp(-0.5*(x-m)^2/s^2)/s/sqrt(2*pi)
g1c <- Deriv(g, "x") # cache enabled by default
g1n <- Deriv(g, "x", cache.exp=FALSE) # cache disabled
g2c <- Deriv(g1c, "x") # cache enabled by default
g2n <- Deriv(g1n, "x", cache.exp=FALSE) # cache disabled
m <- 0.5
s <- 3.
x=seq(-2, 2, len=11)
f <- function(a) (1+a)^(1/a)
f1c <- Deriv(f)
f2c <- Deriv(f1c)
f3c <- Deriv(f2c)
f1 <- Deriv(f, cache.exp=FALSE)
f2 <- Deriv(f1, cache.exp=FALSE)
f3 <- Deriv(f2, cache.exp=FALSE)
a=seq(0.01, 2, len=11)
test_that("expression cache test", {
expect_equal_deriv(exp(-0.5*(x-m)^2/s^2)/s/sqrt(2*pi), -(exp(-(0.5 * ((x - m)^2/s^2))) * (x - m)/(s^3 * sqrt(2 * pi))))
expect_equal(g2n(x, m, s), g2c(x, m, s))
expect_equal(f3(a), f3c(a))
})
# composite function differentiation/caching (issue #6)
f<-function(x){ t<-x^2; log(t) }
g<-function(x) cos(f(x))
test_that("composite function", {
expect_equal(Deriv(g,"x"), function (x) -(2 * (sin(f(x))/x)))
})
# user function with non diff arguments
ifel<-ifelse
drule[["ifel"]]<-alist(test=NULL, yes=(test)*1, no=(!test)*1)
suppressWarnings(rm(t))
expect_equal(Deriv(~ifel(abs(t)<0.1, t**2, abs(t)), "t"), quote({
.e2 <- abs(t) < 0.1
(!.e2) * sign(t) + 2 * (t * .e2)
}))
drule[["ifel"]]<-NULL
# test error reporting
test_that("error reporting", {
expect_error(Deriv(rnorm), "is not in derivative table", fixed=TRUE)
expect_error(Deriv(~rnorm(x), "x"), "is not in derivative table", fixed=TRUE)
expect_error(Deriv(~x+rnorm(x), "x"), "is not in derivative table", fixed=TRUE)
})
# systematic central difference tests
set.seed(7)
test_that("central differences", {
for (nm_f in ls(drule)) {
rule <- drule[[nm_f]]
larg <- rule
narg <- length(larg)
larg[] <- runif(narg)
# possible logical parameters are swithed on/off
fargs=formals(nm_f)
ilo=sapply(fargs, is.logical)
if (any(ilo))
logrid=do.call(expand.grid, rep(list(c(TRUE, FALSE)), sum(ilo)))
for (iarg in seq_len(narg)) {
if (is.null(rule[[iarg]]))
next
if (is.null(fargs) || !any(ilo)) {
suppressWarnings(num_test_deriv(nm_f, larg, narg=iarg))
} else {
apply(logrid, 1, function(lv) {
lolarg=larg
lolarg[ilo]=lv
suppressWarnings(num_test_deriv(nm_f, lolarg, narg=iarg))
})
}
}
}
})
tmp <- Deriv(Deriv(quote(dnorm(x ** 2 - x)), "x"), "x")
test_that("dsym cleaning after nested call", {
expect_identical(Deriv(quote(.e1*x), "x"), quote(.e1)) # was issue #2
})
# doc examples
fsq <- function(x) x^2
fsc <- function(x, y) sin(x) * cos(y)
f_ <- Deriv(fsc)
fc <- function(x, h=0.1) if (abs(x) < h) 0.5*h*(x/h)**2 else abs(x)-0.5*h
myfun <- function(x, y=TRUE) NULL # do something usefull
dmyfun <- function(x, y=TRUE) NULL # myfun derivative by x.
drule[["myfun"]] <- alist(x=dmyfun(x, y), y=NULL) # y is just a logical
#cat("Deriv(myfun)=", format1(Deriv(myfun)), "\n")
theta <- list(m=0.1, sd=2.)
x <- names(theta)
names(x)=rep("theta", length(theta))
test_that("doc examples", {
expect_equal_format1(Deriv(fsq), function (x) 2 * x)
expect_equal_format1(Deriv(fsc), function (x, y) c(x = cos(x) * cos(y), y = -(sin(x) * sin(y))))
expect_equal(f_(3, 4), c(x=0.6471023, y=0.1068000), tolerance = 1.e-7)
expect_equal(Deriv(~ fsc(x, y^2), "y"), quote(-(2 * (y * sin(x) * sin(y^2)))))
expect_equal(Deriv(quote(fsc(x, y^2)), c("x", "y"), cache.exp=FALSE), quote(c(x = cos(x) * cos(y^2), y = -(2 * (y * sin(x) * sin(y^2))))))
expect_equal(Deriv(expression(sin(x^2) * y), "x"), expression(2 * (x * y * cos(x^2))))
expect_equal(Deriv("sin(x^2) * y", "x"), "2 * (x * y * cos(x^2))")
expect_equal(Deriv(fc, "x", cache=FALSE), function(x, h=0.1) if (abs(x) < h) x/h else sign(x))
expect_equal(Deriv(myfun(z^2, FALSE), "z"), quote(2 * (z * dmyfun(z^2, FALSE))))
expect_equal(Deriv(~exp(-(x-theta$m)**2/(2*theta$sd)), x, cache.exp=FALSE),
quote(c(theta_m = exp(-((x - theta$m)^2/(2 * theta$sd))) * (x - theta$m)/theta$sd,
theta_sd = 2 * (exp(-((x - theta$m)^2/(2 * theta$sd))) *
(x - theta$m)^2/(2 * theta$sd)^2))))
})
drule[["myfun"]] <- NULL
Sys.setlocale(category = "LC_COLLATE", locale = lc_orig)
| /Deriv/tests/testthat/test_Deriv.R | no_license | ingted/R-Examples | R | false | false | 12,779 | r | context(paste("Symbolic differentiation rules v", packageVersion("Deriv"), sep=""))
lc_orig=Sys.getlocale(category = "LC_COLLATE")
Sys.setlocale(category = "LC_COLLATE", locale = "C")
num_test_deriv <- function(fun, larg, narg=1, h=1.e-5, tolerance=2000*h^2) {
# test the first derivative of a function fun() (given as a character
# string) by Deriv() and central difference.
# larg is a named list of parameters to pass to fun
# narg indicates by which of fun's arguments the differentiation must be made
# h is the small perturbation in the central differentiation: x-h and x+h
# Parameter tolerance is used in comparison test.
if (length(names(larg)) == 0)
stop(sprintf("No argument for function %s() to differentiate. There must be at leat one argument.", fun))
if (h <= 0)
stop("Parameter h must be positive")
larg_ph=larg_mh=larg
larg_ph[[narg]]=larg_ph[[narg]]+h
larg_mh[[narg]]=larg_mh[[narg]]-h
f_ph=do.call(fun, larg_ph)
f_mh=do.call(fun, larg_mh)
dnum=(f_ph-f_mh)/(2*h)
sym_larg=larg
nm_x=names(larg)[narg]
sym_larg[[narg]]=as.symbol(nm_x)
flang=as.symbol(fun)
dsym=do.call(as.function(c(sym_larg, Deriv(as.call(c(flang, sym_larg)), nm_x))), larg, quote=TRUE)
#cat(sprintf("comparing %s by %s\n", format1(as.call(c(flang, larg))), nm_x))
expect_equal(dnum, dsym, tolerance=tolerance, info=sprintf("%s by %s", format1(as.call(c(flang, larg))), nm_x))
}
f=function(x) {} # empty place holder
expect_equal_deriv <- function(t, r, nmvar="x") {
test=substitute(t)
ref=substitute(r)
# compare as language
ans=Deriv(test, nmvar, cache.exp=FALSE)
#print(deparse(ans))
eval(bquote(expect_equal(format1(quote(.(ans))), format1(quote(.(ref))))))
# compare as string
ans=Deriv(format1(test), nmvar, cache.exp=FALSE)
#print(ans)
eval(bquote(expect_equal(.(ans), format1(quote(.(ref))))))
# compare as formula
ans=Deriv(call("~", test), nmvar, cache.exp=FALSE)
#print(deparse(ans))
eval(bquote(expect_equal(format1(quote(.(ans))), format1(quote(.(ref))))))
# compare as expression
ans=Deriv(as.expression(test), nmvar, cache.exp=FALSE)
#print(deparse(ans))
eval(bquote(expect_equal(format1(.(ans)), format1(expression(.(ref))))))
# compare as function
body(f)=test
ans=Deriv(f, nmvar, cache.exp=FALSE)
body(f)=ref
#cat("\nf deriv=", format1(ans), "\n", sep="")
#cat("\nsimplify=", format1(Simplify(ans)), "\n", sep="")
#cat("f ref=", format1(f), "\n", sep="")
eval(bquote(expect_equal(quote(.(ans)), quote(.(f)))))
# compare with central differences
x=seq(0.1, 1, len=10)
h=1.e-7
suppressWarnings(f1 <- try(sapply(x-h, function(val) eval(test, list(x=val))), silent=TRUE))
suppressWarnings(f2 <- try(sapply(x+h, function(val) eval(test, list(x=val))), silent=TRUE))
if (!inherits(f1, "try-error") && !inherits(f2, "try-error")) {
numder=(f2-f1)/h/2
refder=sapply(x, function(val) eval(ref, list(x=val)))
i=is.finite(refder) & is.finite(numder)
expect_more_than(sum(i), 0, label=sprintf("length of central diff for %s", format1(test)))
expect_equal(numder[i], refder[i], tolerance=5.e-8, label=sprintf("Central diff. of '%s'", format1(test)), expected.label=sprintf("'%s'", format1(ref)))
}
}
expect_equal_format1 <- function(t, r) {
eval(bquote(expect_equal(format1(.(t)), format1(.(r)))))
}
test_that("elementary functions", {
expect_equal(Deriv("x", "x"), "1")
expect_equal(Deriv(quote(x), "x"), 1)
expect_equal(Deriv(quote((x)), "x"), 1)
expect_equal_deriv(x**2, 2*x)
expect_equal_deriv(x**n, n*x^(n-1))
expect_equal_deriv(2**x, 0.693147180559945 * 2^x)
expect_equal_deriv(sin(x), cos(x))
expect_equal_deriv(cos(x), -sin(x))
expect_equal_deriv(tan(x), 1/cos(x)^2)
expect_equal_deriv(asin(x), 1/sqrt(1 - x^2))
expect_equal_deriv(acos(x), -(1/sqrt(1 - x^2)))
expect_equal_deriv(atan(x), 1/(1+x^2))
expect_equal_deriv(atan2(x, y), y/(x^2+y^2))
expect_equal_deriv(atan2(0.5, x), -(0.5/(0.25 + x^2)))
expect_equal_deriv(exp(x), exp(x))
expect_equal_deriv(expm1(x), exp(x))
expect_equal_deriv(log(x), 1/x)
expect_equal_deriv(log1p(x), 1/(1+x))
expect_equal_deriv(abs(x), sign(x))
expect_equal_deriv(sign(x), 0)
expect_equal_deriv(sinh(x), cosh(x))
expect_equal_deriv(cosh(x), sinh(x))
expect_equal_deriv(tanh(x), 1-tanh(x)^2)
})
if (getRversion() >= "3.1.0") {
test_that("trigonometric functions with pi", {
expect_equal_deriv(sinpi(x), pi*cospi(x))
expect_equal_deriv(cospi(x), -(pi*sinpi(x)))
expect_equal_deriv(tanpi(x), pi/cospi(x)**2)
})
}
test_that("special functions", {
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(x) - digamma(x + y)))
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(y) - digamma(x + y)), "y")
expect_equal_deriv(besselI(x, 0), besselI(x, 1))
expect_equal_deriv(besselI(x, 0, FALSE), besselI(x, 1))
expect_equal_deriv(besselI(x, 0, TRUE), besselI(x, 1, TRUE)-besselI(x, 0, TRUE))
expect_equal_deriv(besselI(x, 1), 0.5 * (besselI(x, 0) + besselI(x, 2)))
expect_equal_deriv(besselI(x, 1, FALSE), 0.5 * (besselI(x, 0) + besselI(x, 2)))
expect_equal_deriv(besselI(x, 1, TRUE), 0.5 * (besselI(x, 0, TRUE) + besselI(x, 2, TRUE))-besselI(x, 1, TRUE))
expect_equal_deriv(besselI(x, n), if (n == 0) besselI(x, 1) else 0.5 * (besselI(x, 1 + n) + besselI(x, n - 1)))
expect_equal_deriv(besselI(x, n, TRUE), (if (n == 0) besselI(x, 1, TRUE) else 0.5 * (besselI(x, 1 + n, TRUE) + besselI(x, n - 1, TRUE)))-besselI(x, n, TRUE))
expect_equal_deriv(besselK(x, 0), -besselK(x, 1))
expect_equal_deriv(besselK(x, 0, FALSE), -besselK(x, 1))
expect_equal_deriv(besselK(x, 0, TRUE), besselK(x, 0, TRUE)-besselK(x, 1, TRUE))
expect_equal_deriv(besselK(x, 1), -(0.5 * (besselK(x, 0) + besselK(x, 2))))
expect_equal_deriv(besselK(x, 1, FALSE), -(0.5 * (besselK(x, 0) + besselK(x, 2))))
expect_equal_deriv(besselK(x, 1, TRUE), besselK(x, 1, TRUE)-0.5 * (besselK(x, 0, TRUE) + besselK(x, 2, TRUE)))
expect_equal_deriv(besselK(x, n), if (n == 0) -besselK(x, 1) else -(0.5 * (besselK(x, 1 + n) + besselK(x, n - 1))))
expect_equal_deriv(besselK(x, n, FALSE), if (n == 0) -besselK(x, 1) else -(0.5 * (besselK(x, 1 + n) + besselK(x, n - 1))))
expect_equal_deriv(besselK(x, n, TRUE), besselK(x, n, TRUE)+if (n == 0) -besselK(x, 1, TRUE) else -(0.5 * (besselK(x, 1 + n, TRUE) + besselK(x, n - 1, TRUE))))
expect_equal_deriv(besselJ(x, 0), -besselJ(x, 1))
expect_equal_deriv(besselJ(x, 1), 0.5 * (besselJ(x, 0) - besselJ(x, 2)))
expect_equal_deriv(besselJ(x, n), if (n == 0) -besselJ(x, 1) else 0.5 * (besselJ(x, n - 1) - besselJ(x, 1 + n)))
expect_equal_deriv(besselY(x, 0), -besselY(x, 1))
expect_equal_deriv(besselY(x, 1), 0.5 * (besselY(x, 0) - besselY(x, 2)))
expect_equal_deriv(besselY(x, n), if (n == 0) -besselY(x, 1) else 0.5 * (besselY(x, n - 1) - besselY(x, 1 + n)))
expect_equal_deriv(gamma(x), digamma(x) * gamma(x))
expect_equal_deriv(lgamma(x), digamma(x))
expect_equal_deriv(digamma(x), trigamma(x))
expect_equal_deriv(trigamma(x), psigamma(x, 2L))
expect_equal_deriv(psigamma(x), psigamma(x, 1L))
expect_equal_deriv(psigamma(x, n), psigamma(x, 1L+n))
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(x) - digamma(x + y)))
expect_equal_deriv(beta(x, y), beta(x, y) * (digamma(y) - digamma(x + y)), "y")
expect_equal_deriv(lbeta(x, y), digamma(x) - digamma(x + y))
expect_equal_deriv(lbeta(x, y), digamma(y) - digamma(x + y), "y")
})
test_that("probability densities", {
expect_equal_deriv(dbinom(5,3,x), 3 * ((3 - 5 * x) * dbinom(5, 2, x)/(1 - x)^2))
expect_equal_deriv(dnorm(x, m=0.5), -(dnorm(x, 0.5, 1) * (x - 0.5)))
})
test_that("chain rule: multiply by a const", {
expect_equal_deriv(a*x, a)
expect_equal_deriv(a[1]*x, a[1])
expect_equal_deriv(a[[1]]*x, a[[1]])
expect_equal_deriv(a$b*x, a$b)
expect_equal_deriv((a*x)**2, 2*(a^2*x))
expect_equal_deriv((a*x)**n, a*n*(a*x)^(n-1))
expect_equal_deriv(sin(a*x), a*cos(a*x))
expect_equal_deriv(cos(a*x), -(a*sin(a*x)))
expect_equal_deriv(tan(a*x), a/cos(a*x)^2)
expect_equal_deriv(exp(a*x), a*exp(a*x))
expect_equal_deriv(log(a*x), 1/x)
})
test_that("particular cases", {
expect_equal_deriv(log(x, x), 0)
expect_equal_deriv(x^n+sin(n*x), n * (cos(n * x) + x^(n - 1)))
expect_equal_deriv(x*(1-x), 1-2*x)
expect_equal_deriv(x^x, x^x+x^x*log(x))
})
# test AD and caching
# gaussian function
g <- function(x, m=0, s=1) exp(-0.5*(x-m)^2/s^2)/s/sqrt(2*pi)
g1c <- Deriv(g, "x") # cache enabled by default
g1n <- Deriv(g, "x", cache.exp=FALSE) # cache disabled
g2c <- Deriv(g1c, "x") # cache enabled by default
g2n <- Deriv(g1n, "x", cache.exp=FALSE) # cache disabled
m <- 0.5
s <- 3.
x=seq(-2, 2, len=11)
f <- function(a) (1+a)^(1/a)
f1c <- Deriv(f)
f2c <- Deriv(f1c)
f3c <- Deriv(f2c)
f1 <- Deriv(f, cache.exp=FALSE)
f2 <- Deriv(f1, cache.exp=FALSE)
f3 <- Deriv(f2, cache.exp=FALSE)
a=seq(0.01, 2, len=11)
test_that("expression cache test", {
expect_equal_deriv(exp(-0.5*(x-m)^2/s^2)/s/sqrt(2*pi), -(exp(-(0.5 * ((x - m)^2/s^2))) * (x - m)/(s^3 * sqrt(2 * pi))))
expect_equal(g2n(x, m, s), g2c(x, m, s))
expect_equal(f3(a), f3c(a))
})
# composite function differentiation/caching (issue #6)
f<-function(x){ t<-x^2; log(t) }
g<-function(x) cos(f(x))
test_that("composite function", {
expect_equal(Deriv(g,"x"), function (x) -(2 * (sin(f(x))/x)))
})
# user function with non diff arguments
ifel<-ifelse
drule[["ifel"]]<-alist(test=NULL, yes=(test)*1, no=(!test)*1)
suppressWarnings(rm(t))
expect_equal(Deriv(~ifel(abs(t)<0.1, t**2, abs(t)), "t"), quote({
.e2 <- abs(t) < 0.1
(!.e2) * sign(t) + 2 * (t * .e2)
}))
drule[["ifel"]]<-NULL
# test error reporting
test_that("error reporting", {
expect_error(Deriv(rnorm), "is not in derivative table", fixed=TRUE)
expect_error(Deriv(~rnorm(x), "x"), "is not in derivative table", fixed=TRUE)
expect_error(Deriv(~x+rnorm(x), "x"), "is not in derivative table", fixed=TRUE)
})
# systematic central difference tests
set.seed(7)
test_that("central differences", {
for (nm_f in ls(drule)) {
rule <- drule[[nm_f]]
larg <- rule
narg <- length(larg)
larg[] <- runif(narg)
# possible logical parameters are swithed on/off
fargs=formals(nm_f)
ilo=sapply(fargs, is.logical)
if (any(ilo))
logrid=do.call(expand.grid, rep(list(c(TRUE, FALSE)), sum(ilo)))
for (iarg in seq_len(narg)) {
if (is.null(rule[[iarg]]))
next
if (is.null(fargs) || !any(ilo)) {
suppressWarnings(num_test_deriv(nm_f, larg, narg=iarg))
} else {
apply(logrid, 1, function(lv) {
lolarg=larg
lolarg[ilo]=lv
suppressWarnings(num_test_deriv(nm_f, lolarg, narg=iarg))
})
}
}
}
})
tmp <- Deriv(Deriv(quote(dnorm(x ** 2 - x)), "x"), "x")
test_that("dsym cleaning after nested call", {
expect_identical(Deriv(quote(.e1*x), "x"), quote(.e1)) # was issue #2
})
# doc examples
fsq <- function(x) x^2
fsc <- function(x, y) sin(x) * cos(y)
f_ <- Deriv(fsc)
fc <- function(x, h=0.1) if (abs(x) < h) 0.5*h*(x/h)**2 else abs(x)-0.5*h
myfun <- function(x, y=TRUE) NULL # do something usefull
dmyfun <- function(x, y=TRUE) NULL # myfun derivative by x.
drule[["myfun"]] <- alist(x=dmyfun(x, y), y=NULL) # y is just a logical
#cat("Deriv(myfun)=", format1(Deriv(myfun)), "\n")
theta <- list(m=0.1, sd=2.)
x <- names(theta)
names(x)=rep("theta", length(theta))
test_that("doc examples", {
expect_equal_format1(Deriv(fsq), function (x) 2 * x)
expect_equal_format1(Deriv(fsc), function (x, y) c(x = cos(x) * cos(y), y = -(sin(x) * sin(y))))
expect_equal(f_(3, 4), c(x=0.6471023, y=0.1068000), tolerance = 1.e-7)
expect_equal(Deriv(~ fsc(x, y^2), "y"), quote(-(2 * (y * sin(x) * sin(y^2)))))
expect_equal(Deriv(quote(fsc(x, y^2)), c("x", "y"), cache.exp=FALSE), quote(c(x = cos(x) * cos(y^2), y = -(2 * (y * sin(x) * sin(y^2))))))
expect_equal(Deriv(expression(sin(x^2) * y), "x"), expression(2 * (x * y * cos(x^2))))
expect_equal(Deriv("sin(x^2) * y", "x"), "2 * (x * y * cos(x^2))")
expect_equal(Deriv(fc, "x", cache=FALSE), function(x, h=0.1) if (abs(x) < h) x/h else sign(x))
expect_equal(Deriv(myfun(z^2, FALSE), "z"), quote(2 * (z * dmyfun(z^2, FALSE))))
expect_equal(Deriv(~exp(-(x-theta$m)**2/(2*theta$sd)), x, cache.exp=FALSE),
quote(c(theta_m = exp(-((x - theta$m)^2/(2 * theta$sd))) * (x - theta$m)/theta$sd,
theta_sd = 2 * (exp(-((x - theta$m)^2/(2 * theta$sd))) *
(x - theta$m)^2/(2 * theta$sd)^2))))
})
drule[["myfun"]] <- NULL
Sys.setlocale(category = "LC_COLLATE", locale = lc_orig)
|
library(openxlsx)
library(officer)
library(ReporteRs)
save_to_word <- function(table_object,
table_title,
docx_path = "tables.docx",
overwrite = F) {
# Create Word file if specified one is not found
if (!file.exists(docx_path) |
overwrite == T) {
doc <- docx()
writeDoc(doc, file = docx_path)
}
tab <- vanilla.table(table_object)
tab <- setZebraStyle(tab, even = '#eeeeee', odd = 'white')
doc <- docx(template = docx_path)
doc <- addParagraph(doc, value = table_title)
#doc <- addTitle(doc, table_title)
doc <- addFlexTable( doc, tab)
doc <- addParagraph(doc, "")
writeDoc(doc, file = docx_path)
}
save_to_excel <- function(table_object,
sheet_name,
xlsx_path = "tables.xlsx",
overwrite = F) {
# Check if there is an xlsx output file
if (file.exists(xlsx_path) & overwrite == F) {
wb <- loadWorkbook(file = xlsx_path)}
else {
wb <- createWorkbook()
}
addWorksheet(wb, sheet_name)
writeDataTable(wb, x = table_object, sheet = sheet_name,
colNames = TRUE, rowNames = F, withFilter = F,
tableStyle = "TableStyleLight1",
firstColumn = F, bandedRows = F)
#"TableStyleLight1")
saveWorkbook(wb, xlsx_path, overwrite = T)
}
| /table_export.R | no_license | alexeyknorre/stir | R | false | false | 1,405 | r | library(openxlsx)
library(officer)
library(ReporteRs)
save_to_word <- function(table_object,
table_title,
docx_path = "tables.docx",
overwrite = F) {
# Create Word file if specified one is not found
if (!file.exists(docx_path) |
overwrite == T) {
doc <- docx()
writeDoc(doc, file = docx_path)
}
tab <- vanilla.table(table_object)
tab <- setZebraStyle(tab, even = '#eeeeee', odd = 'white')
doc <- docx(template = docx_path)
doc <- addParagraph(doc, value = table_title)
#doc <- addTitle(doc, table_title)
doc <- addFlexTable( doc, tab)
doc <- addParagraph(doc, "")
writeDoc(doc, file = docx_path)
}
save_to_excel <- function(table_object,
sheet_name,
xlsx_path = "tables.xlsx",
overwrite = F) {
# Check if there is an xlsx output file
if (file.exists(xlsx_path) & overwrite == F) {
wb <- loadWorkbook(file = xlsx_path)}
else {
wb <- createWorkbook()
}
addWorksheet(wb, sheet_name)
writeDataTable(wb, x = table_object, sheet = sheet_name,
colNames = TRUE, rowNames = F, withFilter = F,
tableStyle = "TableStyleLight1",
firstColumn = F, bandedRows = F)
#"TableStyleLight1")
saveWorkbook(wb, xlsx_path, overwrite = T)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_nifti.R
\name{read_nifti}
\alias{read_nifti}
\title{Read OCTExplorer-ready NIFTI file}
\usage{
read_nifti(nifti_file)
}
\description{
Read OCTExplorer-ready NIFTI file in the manner of read_vol
}
| /man/read_nifti.Rd | permissive | barefootbiology/heyexr | R | false | true | 279 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_nifti.R
\name{read_nifti}
\alias{read_nifti}
\title{Read OCTExplorer-ready NIFTI file}
\usage{
read_nifti(nifti_file)
}
\description{
Read OCTExplorer-ready NIFTI file in the manner of read_vol
}
|
# packages
library(tidyverse)
# read in data
dat <- read.csv("data.csv")
# create plot
O2_plot <- quickplot(data = dat,
x = O2_uM,
y = Depth_m,
color = Season) +
xlab("Oxygen")
# Save plot
ggsave("O2_plot.png") | /plot.R | no_license | holloxob/reproducible_research_files | R | false | false | 247 | r | # packages
library(tidyverse)
# read in data
dat <- read.csv("data.csv")
# create plot
O2_plot <- quickplot(data = dat,
x = O2_uM,
y = Depth_m,
color = Season) +
xlab("Oxygen")
# Save plot
ggsave("O2_plot.png") |
#Importar el data set
dataset = read.csv('Data.csv')
#Tratamiento de los dataset
dataset$Age =
ifelse(
is.na(dataset$Age),
ave(dataset$Age, FUN = function(x) mean(x, na.rm = TRUE)),
dataset$Age)
dataset$Salary =
ifelse(
is.na(dataset$Salary),
ave(dataset$Salary, FUN = function(x) mean(x, na.rm = TRUE)),
dataset$Salary)
# Codificar las variables categoricas
dataset$Country =
factor(
dataset$Country,
levels = c("France", "Spain", "Germany"),
labels = c(1, 3, 3))
dataset$Purchased =
factor(
dataset$Purchased,
levels = c("No", "Yes"),
labels = c(0, 1))
# Dividir los datos en conjunto de entrenamiento y test
#install.packages("caTools")
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.8)
trainingSet = subset(dataset, split == TRUE)
testingSet = subset(dataset, split == FALSE)
# Escalar los datos, para tener el mimo ranfod e valores. La distancia de Euclides
trainingSet[, 2:3] = scale(trainingSet[, 2:3])
testingSet[, 2:3] = scale(testingSet[, 2:3])
| /datasets/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/data_preprocessing.R | permissive | canteroferron/machinelearning-az | R | false | false | 1,062 | r | #Importar el data set
dataset = read.csv('Data.csv')
#Tratamiento de los dataset
dataset$Age =
ifelse(
is.na(dataset$Age),
ave(dataset$Age, FUN = function(x) mean(x, na.rm = TRUE)),
dataset$Age)
dataset$Salary =
ifelse(
is.na(dataset$Salary),
ave(dataset$Salary, FUN = function(x) mean(x, na.rm = TRUE)),
dataset$Salary)
# Codificar las variables categoricas
dataset$Country =
factor(
dataset$Country,
levels = c("France", "Spain", "Germany"),
labels = c(1, 3, 3))
dataset$Purchased =
factor(
dataset$Purchased,
levels = c("No", "Yes"),
labels = c(0, 1))
# Dividir los datos en conjunto de entrenamiento y test
#install.packages("caTools")
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.8)
trainingSet = subset(dataset, split == TRUE)
testingSet = subset(dataset, split == FALSE)
# Escalar los datos, para tener el mimo ranfod e valores. La distancia de Euclides
trainingSet[, 2:3] = scale(trainingSet[, 2:3])
testingSet[, 2:3] = scale(testingSet[, 2:3])
|
# running all scripts in demo folder
demo(basic_walkthrough)
demo(custom_objective)
demo(boost_from_prediction)
demo(predict_first_ntree)
demo(generalized_linear_model)
demo(cross_validation)
demo(create_sparse_matrix)
demo(predict_leaf_indices)
demo(early_stopping)
demo(poisson_regression)
| /R-package/demo/runall.R | permissive | saurav111/xgboost | R | false | false | 292 | r | # running all scripts in demo folder
demo(basic_walkthrough)
demo(custom_objective)
demo(boost_from_prediction)
demo(predict_first_ntree)
demo(generalized_linear_model)
demo(cross_validation)
demo(create_sparse_matrix)
demo(predict_leaf_indices)
demo(early_stopping)
demo(poisson_regression)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/duplicates_check.R
\name{duplicates_check}
\alias{duplicates_check}
\title{Check and remove duplicate ids}
\usage{
duplicates_check(
x,
id = "Subject",
unique = c("SessionDate", "SessionTime"),
n = 1,
remove = TRUE,
keep = "none",
save_as = NULL
)
}
\arguments{
\item{x}{dataframe}
\item{id}{Subject ID variable name.}
\item{unique}{Column names that are unique and should be used to
check for duplicate id's}
\item{n}{Number of unique id's expected (default: 1)}
\item{remove}{logical. Remove duplicate ids from data? (default: TRUE)}
\item{keep}{If remove = TRUE, should one or more of the dupilcate id's be kept?
options: "none", "first by date"}
\item{save_as}{Folder path and file name to output the duplicate ID's}
}
\description{
This function checks and removes duplicate ids
}
| /man/duplicates_check.Rd | no_license | dr-JT/datawrangling | R | false | true | 884 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/duplicates_check.R
\name{duplicates_check}
\alias{duplicates_check}
\title{Check and remove duplicate ids}
\usage{
duplicates_check(
x,
id = "Subject",
unique = c("SessionDate", "SessionTime"),
n = 1,
remove = TRUE,
keep = "none",
save_as = NULL
)
}
\arguments{
\item{x}{dataframe}
\item{id}{Subject ID variable name.}
\item{unique}{Column names that are unique and should be used to
check for duplicate id's}
\item{n}{Number of unique id's expected (default: 1)}
\item{remove}{logical. Remove duplicate ids from data? (default: TRUE)}
\item{keep}{If remove = TRUE, should one or more of the dupilcate id's be kept?
options: "none", "first by date"}
\item{save_as}{Folder path and file name to output the duplicate ID's}
}
\description{
This function checks and removes duplicate ids
}
|
library(shinytest)
recordTest(test_path("apps", "table_module"))
| /tests/testthat/apps/table_module/tests/table_module-test.R | permissive | MartinSchobben/oceanexplorer | R | false | false | 65 | r | library(shinytest)
recordTest(test_path("apps", "table_module"))
|
# Loading packages
library(funHDDC)
library(R.matlab)
library(dplyr)
# Simulation Scenario
nSim = 50
Group_size = 20
var_random1 = 50
var_random2 = 200
var_random3 = 100
var_noise = 1
njobs = 20
random_seed <- c(0, 100*(1:(njobs-1)))
True.nSim = nSim*njobs
# High SNR, Group_size = 20
# basisSNR = 7
# orderSNR = 3
# Low SNR, Group_size = 20
basisSNR = 7
orderSNR = 3
# Low SNR, Group_size = 100
# basisSNR = 7
# orderSNR = 2
# Data I/O
path_data <- "Y:/Users/Jialin Yi/output/paper simulation/VaryClusters/data/"
path_out_data <- "Y:/Users/Jialin Yi/output/paper simulation/FunHDDC/data/"
path_out_plot <- "Y:/Users/Jialin Yi/output/paper simulation/FunHDDC/plot/"
name_file <- paste(toString(nSim), toString(Group_size),
toString(var_random1), toString(var_random2), toString(var_random3),
toString(var_noise), sep = "-")
True.name_file <- paste(toString(True.nSim), toString(Group_size),
toString(var_random1), toString(var_random2), toString(var_random3),
toString(var_noise), sep = "-")
# Functions
EncapFunHDDC <- function(dataset, n_cl, n_b, n_o, modeltype, init_cl){
T = nrow(dataset)
basis <- create.bspline.basis(c(0, T), nbasis=n_b, norder=n_o)
fdobj <- smooth.basis(1:T, dataset,basis,
fdnames=list("Time", "Subject", "Score"))$fd
res = funHDDC(fdobj,n_cl,model=modeltype,init=init_cl, thd = 0.01)
return(list(res, fdobj))
}
CRate <- function(ClusterMatrix){
ClassRate = 0
for(i in 1:ncol(ClusterMatrix)){
MostFreqNum <- tail(names(sort(table(ClusterMatrix[,i]))), 1)
Freq <- sum(ClusterMatrix[,i] == as.numeric(MostFreqNum))
ClassRate = ClassRate + (Freq/nrow(ClusterMatrix))/ncol(ClusterMatrix)
}
return(ClassRate)
}
FixSimulation <- function(data_nSim, nbasis = 18, norder = 3){
CR = 1:ncol(data_nSim)
for(i in 1:ncol(data_nSim)){
dataset <- matrix(pull(data_nSim, i), ncol = 60, byrow = TRUE)
modeltype='ABQkDk'
out <- EncapFunHDDC(dataset, 3, nbasis, norder, modeltype, 'kmeans')
res <- out[[1]]
#fdobj <- out[[2]]
mat_cl <- matrix(res$cls, nrow = Group_size)
CR[i] <- CRate(mat_cl)
}
return(CR)
}
###################################################################
######################## Simulation
###################################################################
# CRate File to save all simulation
Cluster.Compara <- data.frame(Method=character(),
CRate=double())
colnames(Cluster.Compara) <- c("Method", "CRate")
for(job in random_seed){
# Loading data
job_file = paste(name_file, toString(job), sep = "-")
All <- readMat(paste(path_data, job_file, ".mat", sep = ""))
data_set <- split(All$data,
as.factor(rep(1:nSim, each = length(All$data)/nSim)))
data_set <- bind_rows(data_set)
# FunHDDC on simulated data
CRFunHDDC <- FixSimulation(data_set, nbasis = basisSNR, norder = orderSNR)
# FTSC on simulation data
CRFTSC <- as.vector(All$FTSC.CRate)
# K-means on simulation data
CRKmeans <- as.vector(All$kmeans.CRate)
# Save classification rate
CRates.Data <- data.frame(rep(c("FTSC", "FunHDDC", "Kmeans"), each=nSim),
c(CRFTSC, CRFunHDDC, CRKmeans))
colnames(CRates.Data) <- c("Method", "CRate")
Cluster.Compara <- rbind(Cluster.Compara, CRates.Data)
}
save(Cluster.Compara, file = paste(path_out_data, True.name_file, ".Rdata", sep = ""))
# Plots
pdf(paste(path_out_plot, True.name_file, ".pdf", sep = ""),
width = 8.05, height = 5.76)
#par(mfrow = c(1,2), oma = c(0, 0, 2, 0))
yRange = c(min(Cluster.Compara$CRate), max(Cluster.Compara$CRate))
# box plot
boxplot(CRate ~ Method, data = Cluster.Compara)
mtext(paste("Var of noise =", toString(var_noise), ",",
"Group size =", toString(Group_size)), outer = TRUE, cex = 1.5)
dev.off()
| /funHDDC/multi_simu_VaryfunHDDC.R | no_license | jialinyi94/FTSC | R | false | false | 3,923 | r | # Loading packages
library(funHDDC)
library(R.matlab)
library(dplyr)
# Simulation Scenario
nSim = 50
Group_size = 20
var_random1 = 50
var_random2 = 200
var_random3 = 100
var_noise = 1
njobs = 20
random_seed <- c(0, 100*(1:(njobs-1)))
True.nSim = nSim*njobs
# High SNR, Group_size = 20
# basisSNR = 7
# orderSNR = 3
# Low SNR, Group_size = 20
basisSNR = 7
orderSNR = 3
# Low SNR, Group_size = 100
# basisSNR = 7
# orderSNR = 2
# Data I/O
path_data <- "Y:/Users/Jialin Yi/output/paper simulation/VaryClusters/data/"
path_out_data <- "Y:/Users/Jialin Yi/output/paper simulation/FunHDDC/data/"
path_out_plot <- "Y:/Users/Jialin Yi/output/paper simulation/FunHDDC/plot/"
name_file <- paste(toString(nSim), toString(Group_size),
toString(var_random1), toString(var_random2), toString(var_random3),
toString(var_noise), sep = "-")
True.name_file <- paste(toString(True.nSim), toString(Group_size),
toString(var_random1), toString(var_random2), toString(var_random3),
toString(var_noise), sep = "-")
# Functions
EncapFunHDDC <- function(dataset, n_cl, n_b, n_o, modeltype, init_cl){
T = nrow(dataset)
basis <- create.bspline.basis(c(0, T), nbasis=n_b, norder=n_o)
fdobj <- smooth.basis(1:T, dataset,basis,
fdnames=list("Time", "Subject", "Score"))$fd
res = funHDDC(fdobj,n_cl,model=modeltype,init=init_cl, thd = 0.01)
return(list(res, fdobj))
}
CRate <- function(ClusterMatrix){
ClassRate = 0
for(i in 1:ncol(ClusterMatrix)){
MostFreqNum <- tail(names(sort(table(ClusterMatrix[,i]))), 1)
Freq <- sum(ClusterMatrix[,i] == as.numeric(MostFreqNum))
ClassRate = ClassRate + (Freq/nrow(ClusterMatrix))/ncol(ClusterMatrix)
}
return(ClassRate)
}
FixSimulation <- function(data_nSim, nbasis = 18, norder = 3){
CR = 1:ncol(data_nSim)
for(i in 1:ncol(data_nSim)){
dataset <- matrix(pull(data_nSim, i), ncol = 60, byrow = TRUE)
modeltype='ABQkDk'
out <- EncapFunHDDC(dataset, 3, nbasis, norder, modeltype, 'kmeans')
res <- out[[1]]
#fdobj <- out[[2]]
mat_cl <- matrix(res$cls, nrow = Group_size)
CR[i] <- CRate(mat_cl)
}
return(CR)
}
###################################################################
######################## Simulation
###################################################################
# CRate File to save all simulation
Cluster.Compara <- data.frame(Method=character(),
CRate=double())
colnames(Cluster.Compara) <- c("Method", "CRate")
for(job in random_seed){
# Loading data
job_file = paste(name_file, toString(job), sep = "-")
All <- readMat(paste(path_data, job_file, ".mat", sep = ""))
data_set <- split(All$data,
as.factor(rep(1:nSim, each = length(All$data)/nSim)))
data_set <- bind_rows(data_set)
# FunHDDC on simulated data
CRFunHDDC <- FixSimulation(data_set, nbasis = basisSNR, norder = orderSNR)
# FTSC on simulation data
CRFTSC <- as.vector(All$FTSC.CRate)
# K-means on simulation data
CRKmeans <- as.vector(All$kmeans.CRate)
# Save classification rate
CRates.Data <- data.frame(rep(c("FTSC", "FunHDDC", "Kmeans"), each=nSim),
c(CRFTSC, CRFunHDDC, CRKmeans))
colnames(CRates.Data) <- c("Method", "CRate")
Cluster.Compara <- rbind(Cluster.Compara, CRates.Data)
}
save(Cluster.Compara, file = paste(path_out_data, True.name_file, ".Rdata", sep = ""))
# Plots
pdf(paste(path_out_plot, True.name_file, ".pdf", sep = ""),
width = 8.05, height = 5.76)
#par(mfrow = c(1,2), oma = c(0, 0, 2, 0))
yRange = c(min(Cluster.Compara$CRate), max(Cluster.Compara$CRate))
# box plot
boxplot(CRate ~ Method, data = Cluster.Compara)
mtext(paste("Var of noise =", toString(var_noise), ",",
"Group size =", toString(Group_size)), outer = TRUE, cex = 1.5)
dev.off()
|
library(tidyverse)
# Create output directory ------------------------------------------------------
fs::dir_create(here::here("lib"))
# Create empty data frame ------------------------------------------------------
df <- data.frame(active = logical(),
outcome = character(),
outcome_group = character(),
outcome_variable = character(),
covariates = character(),
model = character(),
main = character(),
covid_pheno_hospitalised = character(),
covid_pheno_non_hospitalised = character(),
agegp_18_39 = character(),
agegp_40_59 = character(),
agegp_60_79 = character(),
agegp_80_110 = character(),
sex_Male = character(),
sex_Female = character(),
ethnicity_White = character(),
ethnicity_Mixed = character(),
ethnicity_South_Asian = character(),
ethnicity_Black = character(),
ethnicity_Other = character(),
ethnicity_Missing = character(),
prior_history_TRUE = character(),
prior_history_FALSE = character(),
prior_history_var = character(),
venn = character(),
stringsAsFactors = FALSE)
# ------------------------------------------------------------------------------
# Add diabetes outcomes --------------------------------------------------------
# ------------------------------------------------------------------------------
outcomes <- c("type 1 diabetes",
"type 2 diabetes",
"type 2 diabetes - pre diabetes",
"type 2 diabetes - no pre diabetes",
"type 2 diabetes - obesity",
"type 2 diabetes - no obesity",
"other or non-specific diabetes",
"gestational diabetes")
outcome_group <- "diabetes"
outcomes_short <- c("t1dm","t2dm", "t2dm_pd","t2dm_pd_no", "t2dm_obes","t2dm_obes_no", "otherdm","gestationaldm")
outcome_venn <- c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE)
for (i in 1:length(outcomes)) {
df[nrow(df)+1,] <- c(FALSE,
outcomes[i],
outcome_group,
paste0("out_date_",outcomes_short[i]),
"cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_prediabetes;cov_bin_diabetes_gestational",
rep("all",1),
rep(TRUE,3),
rep(FALSE,14),
"",
outcome_venn[i])
}
# change outcome group so that gestational diabetes has its own group
df <- df %>% mutate(outcome_group = case_when(outcome_variable == "out_date_gestationaldm" ~ "diabetes_gestational",
TRUE ~ as.character(outcome_group)))
# turn off t2dm main analysis to save time
df[2,7] <- FALSE
# change outcome group for pre diabetes and obesity analysis
df <- df %>% mutate(outcome_group = case_when(outcome == "type 2 diabetes - pre diabetes" ~ "diabetes_prediabetes",
TRUE ~ as.character(outcome_group)),
outcome_group = case_when(outcome == "type 2 diabetes - no pre diabetes" ~ "diabetes_no_prediabetes",
TRUE ~ as.character(outcome_group)),
outcome_group = case_when(outcome == "type 2 diabetes - obesity" ~ "diabetes_obesity",
TRUE ~ as.character(outcome_group)),
outcome_group = case_when(outcome == "type 2 diabetes - no obesity" ~ "diabetes_no_obesity",
TRUE ~ as.character(outcome_group)))
# turn on subgroups for main t2dm analyses
# df[2,c(10:21)] <- TRUE
# turn on t2dm
df[2,1] <- TRUE
# Remove sex as a covariate for gestational diabetes analysis
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_gestationaldm" ~ "cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_prediabetes;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
# remove BMI for obesity subgroup analysis
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_obes" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_bin_prediabetes;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_obes_no" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_bin_prediabetes;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
# remove pre-diabetes for pre-diabetes subgroup analysis
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_pd" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_pd_no" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
# add pre diabetes subgroup analysis
# df$prior_history_var <- ifelse(df$outcome=="type 2 diabetes" ,"cov_bin_prediabetes",df$prior_history_var)
# df$prior_history_TRUE <- ifelse(df$outcome=="type 2 diabetes" ,TRUE,df$prior_history_TRUE)
# df$prior_history_FALSE <- ifelse(df$outcome=="type 2 diabetes" ,TRUE,df$prior_history_FALSE)
# ------------------------------------------------------------------------------
# Add mental health outcomes --------------------------------------------------------
# ------------------------------------------------------------------------------
outcomes <- c("Depression",
"Anxiety - general",
"Anxiety - obsessive compulsive disorder",
"Anxiety - post traumatic stress disorder",
"Eating disorders",
"Serious mental illness",
"Self harm, aged >=10",
"Self harm, aged >=15",
"Suicide",
"Addiction")
outcome_group <- "mental_health"
outcomes_short <- c("depression",
"anxiety_general",
"anxiety_ocd",
"anxiety_ptsd",
"eating_disorders",
"serious_mental_illness",
"self_harm_10plus",
"self_harm_15plus",
"suicide",
"addiction")
out_venn <- c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE)
for (i in 1:length(outcomes)) {
df[nrow(df)+1,] <- c(FALSE,
outcomes[i],
outcome_group,
paste0("out_date_",outcomes_short[i]),
"cov_num_age;cov_cat_sex;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_cat_smoking_status;cov_bin_carehome_status;cov_num_consulation_rate;cov_bin_healthcare_worker;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_diabetes;cov_bin_obesity;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_ami;cov_bin_stroke_isch;cov_bin_recent_depression;cov_bin_history_depression;cov_bin_recent_anxiety;cov_bin_history_anxiety;cov_bin_recent_eating_disorders;cov_bin_history_eating_disorders;cov_bin_recent_serious_mental_illness;cov_bin_history_serious_mental_illness;cov_bin_recent_self_harm;cov_bin_history_self_harm",
rep("all",1),
rep(TRUE,1),
rep(FALSE,16),
"",
out_venn[i])
}
# df[6,1] <- TRUE
# Save active analyses list ----------------------------------------------------
saveRDS(df, file = "lib/active_analyses.rds") | /analysis/active_analyses.R | permissive | opensafely/post-covid-unvaccinated | R | false | false | 10,737 | r | library(tidyverse)
# Create output directory ------------------------------------------------------
fs::dir_create(here::here("lib"))
# Create empty data frame ------------------------------------------------------
df <- data.frame(active = logical(),
outcome = character(),
outcome_group = character(),
outcome_variable = character(),
covariates = character(),
model = character(),
main = character(),
covid_pheno_hospitalised = character(),
covid_pheno_non_hospitalised = character(),
agegp_18_39 = character(),
agegp_40_59 = character(),
agegp_60_79 = character(),
agegp_80_110 = character(),
sex_Male = character(),
sex_Female = character(),
ethnicity_White = character(),
ethnicity_Mixed = character(),
ethnicity_South_Asian = character(),
ethnicity_Black = character(),
ethnicity_Other = character(),
ethnicity_Missing = character(),
prior_history_TRUE = character(),
prior_history_FALSE = character(),
prior_history_var = character(),
venn = character(),
stringsAsFactors = FALSE)
# ------------------------------------------------------------------------------
# Add diabetes outcomes --------------------------------------------------------
# ------------------------------------------------------------------------------
outcomes <- c("type 1 diabetes",
"type 2 diabetes",
"type 2 diabetes - pre diabetes",
"type 2 diabetes - no pre diabetes",
"type 2 diabetes - obesity",
"type 2 diabetes - no obesity",
"other or non-specific diabetes",
"gestational diabetes")
outcome_group <- "diabetes"
outcomes_short <- c("t1dm","t2dm", "t2dm_pd","t2dm_pd_no", "t2dm_obes","t2dm_obes_no", "otherdm","gestationaldm")
outcome_venn <- c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE)
for (i in 1:length(outcomes)) {
df[nrow(df)+1,] <- c(FALSE,
outcomes[i],
outcome_group,
paste0("out_date_",outcomes_short[i]),
"cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_prediabetes;cov_bin_diabetes_gestational",
rep("all",1),
rep(TRUE,3),
rep(FALSE,14),
"",
outcome_venn[i])
}
# change outcome group so that gestational diabetes has its own group
df <- df %>% mutate(outcome_group = case_when(outcome_variable == "out_date_gestationaldm" ~ "diabetes_gestational",
TRUE ~ as.character(outcome_group)))
# turn off t2dm main analysis to save time
df[2,7] <- FALSE
# change outcome group for pre diabetes and obesity analysis
df <- df %>% mutate(outcome_group = case_when(outcome == "type 2 diabetes - pre diabetes" ~ "diabetes_prediabetes",
TRUE ~ as.character(outcome_group)),
outcome_group = case_when(outcome == "type 2 diabetes - no pre diabetes" ~ "diabetes_no_prediabetes",
TRUE ~ as.character(outcome_group)),
outcome_group = case_when(outcome == "type 2 diabetes - obesity" ~ "diabetes_obesity",
TRUE ~ as.character(outcome_group)),
outcome_group = case_when(outcome == "type 2 diabetes - no obesity" ~ "diabetes_no_obesity",
TRUE ~ as.character(outcome_group)))
# turn on subgroups for main t2dm analyses
# df[2,c(10:21)] <- TRUE
# turn on t2dm
df[2,1] <- TRUE
# Remove sex as a covariate for gestational diabetes analysis
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_gestationaldm" ~ "cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_prediabetes;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
# remove BMI for obesity subgroup analysis
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_obes" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_bin_prediabetes;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_obes_no" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_bin_prediabetes;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
# remove pre-diabetes for pre-diabetes subgroup analysis
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_pd" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
df <- df %>% mutate(covariates = case_when(outcome_variable == "out_date_t2dm_pd_no" ~ "cov_cat_sex;cov_num_age;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_num_consulation_rate;cov_cat_smoking_status;cov_bin_ami;cov_bin_all_stroke;cov_bin_other_arterial_embolism;cov_bin_vte;cov_bin_hf;cov_bin_angina;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_depression;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_healthcare_worker;cov_bin_carehome_status;cov_num_tc_hdl_ratio;cov_cat_bmi_groups;cov_bin_diabetes_gestational",
TRUE ~ as.character(covariates)))
# add pre diabetes subgroup analysis
# df$prior_history_var <- ifelse(df$outcome=="type 2 diabetes" ,"cov_bin_prediabetes",df$prior_history_var)
# df$prior_history_TRUE <- ifelse(df$outcome=="type 2 diabetes" ,TRUE,df$prior_history_TRUE)
# df$prior_history_FALSE <- ifelse(df$outcome=="type 2 diabetes" ,TRUE,df$prior_history_FALSE)
# ------------------------------------------------------------------------------
# Add mental health outcomes --------------------------------------------------------
# ------------------------------------------------------------------------------
outcomes <- c("Depression",
"Anxiety - general",
"Anxiety - obsessive compulsive disorder",
"Anxiety - post traumatic stress disorder",
"Eating disorders",
"Serious mental illness",
"Self harm, aged >=10",
"Self harm, aged >=15",
"Suicide",
"Addiction")
outcome_group <- "mental_health"
outcomes_short <- c("depression",
"anxiety_general",
"anxiety_ocd",
"anxiety_ptsd",
"eating_disorders",
"serious_mental_illness",
"self_harm_10plus",
"self_harm_15plus",
"suicide",
"addiction")
out_venn <- c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE)
for (i in 1:length(outcomes)) {
df[nrow(df)+1,] <- c(FALSE,
outcomes[i],
outcome_group,
paste0("out_date_",outcomes_short[i]),
"cov_num_age;cov_cat_sex;cov_cat_ethnicity;cov_cat_deprivation;cov_cat_region;cov_cat_smoking_status;cov_bin_carehome_status;cov_num_consulation_rate;cov_bin_healthcare_worker;cov_bin_dementia;cov_bin_liver_disease;cov_bin_chronic_kidney_disease;cov_bin_cancer;cov_bin_hypertension;cov_bin_diabetes;cov_bin_obesity;cov_bin_chronic_obstructive_pulmonary_disease;cov_bin_ami;cov_bin_stroke_isch;cov_bin_recent_depression;cov_bin_history_depression;cov_bin_recent_anxiety;cov_bin_history_anxiety;cov_bin_recent_eating_disorders;cov_bin_history_eating_disorders;cov_bin_recent_serious_mental_illness;cov_bin_history_serious_mental_illness;cov_bin_recent_self_harm;cov_bin_history_self_harm",
rep("all",1),
rep(TRUE,1),
rep(FALSE,16),
"",
out_venn[i])
}
# df[6,1] <- TRUE
# Save active analyses list ----------------------------------------------------
saveRDS(df, file = "lib/active_analyses.rds") |
##############################################################
# R code for QTL mapping
#
# http://www.rqtl.org
#
# 2018-1-25
##############################################################
#########################################################################
wkdir <- commandArgs(TRUE)[1]
in_file <- commandArgs(TRUE)[2]
outhk <- commandArgs(TRUE)[3]
outem <- commandArgs(TRUE)[4]
outimp <- commandArgs(TRUE)[5]
#########################################################################
library("qtl")
setwd(wkdir)
#read data
hd <- read.cross("csvr", genotypes=c("AA","AB","BB"), alleles=c("A", "B"), dir = wkdir, in_file)
###################### summary of raw data ######################
# stats
sink("samples.summary_raw_data.txt")
print("#summary of raw data")
summary(hd)
sink()
hd <- calc.genoprob(hd, step=1)
##count CO number
# nxo <- countXO(hd)
# pdf(file = "crossover.count.pdf")
# plot(nxo, ylab="No. crossovers")
# dev.off()
######################## single-QTL ########################
out.em <- scanone(hd, method="em")
out.hk <- scanone(hd, method="hk")
out.imp <- scanone(hd, method="imp")
write.table(out.hk[], file = outhk, sep = "\t",quote = F)
write.table(out.hk[], file = outem, sep = "\t",quote = F)
write.table(out.hk[], file = outimp, sep = "\t",quote = F)
# stats
sink("samples.single.qtl.txt")
print("#summary of out.em")
summary(out.em)
print("#summary of out.hk")
summary(out.hk)
print("#summary of out.imp")
summary(out.imp)
sink()
#plot
pdf(file = "out.em.pdf")
plot(out.em)
dev.off()
pdf(file = "out.hk.pdf")
plot(out.hk)
dev.off()
pdf(file = "out.imp.pdf")
plot(out.imp)
dev.off()
#pdf(file = "out.hk.chr01.pdf")
#plot(out.hk, chr="01")
#dev.off()
###################### Permutation tests ######################
operm <- scanone(hd, method="hk", n.perm=1000)
#stats
sink("Permutation.tests.txt")
print("#summary of operm")
summary(operm)
print("#summary of operm, 0.01 and 0.05")
summary(operm, alpha=c(0.01, 0.05))
print("#summary of operm, 1% significance level")
summary(out.hk, perms=operm, alpha=0.01, pvalues=TRUE)
print("#summary of operm, 5% significance level")
summary(out.hk, perms=operm, alpha=0.05, pvalues=TRUE)
sink()
#plot
pdf(file = "operm.pdf")
plot(operm)
dev.off()
###################### Interval estimates of QTL location ######################
sink("Interval.estimates.hk.1.8_LOD.ex2marker.txt")
print("#LOD support intervals, 1.8-LOD, expand to marker")
lodint(out.hk, chr="01", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="02", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="03", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="04", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="05", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="06", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="07", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="08", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="09", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="10", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="11", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="12", drop=1.8, expandtomarkers=TRUE)
sink()
sink("Interval.estimates.hk.Bayes_0.95.ex2marker.txt")
print("#Bayes credible intervals, 95%, expand to marker")
bayesint(out.hk, chr="01", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="02", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="03", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="04", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="05", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="06", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="07", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="08", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="09", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="10", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="11", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="12", prob=0.95, expandtomarkers=TRUE)
sink()
| /QTL_mapping/qtl_analysis.R | no_license | jiaxianqing/Pipelines | R | false | false | 4,094 | r | ##############################################################
# R code for QTL mapping
#
# http://www.rqtl.org
#
# 2018-1-25
##############################################################
#########################################################################
wkdir <- commandArgs(TRUE)[1]
in_file <- commandArgs(TRUE)[2]
outhk <- commandArgs(TRUE)[3]
outem <- commandArgs(TRUE)[4]
outimp <- commandArgs(TRUE)[5]
#########################################################################
library("qtl")
setwd(wkdir)
#read data
hd <- read.cross("csvr", genotypes=c("AA","AB","BB"), alleles=c("A", "B"), dir = wkdir, in_file)
###################### summary of raw data ######################
# stats
sink("samples.summary_raw_data.txt")
print("#summary of raw data")
summary(hd)
sink()
hd <- calc.genoprob(hd, step=1)
##count CO number
# nxo <- countXO(hd)
# pdf(file = "crossover.count.pdf")
# plot(nxo, ylab="No. crossovers")
# dev.off()
######################## single-QTL ########################
out.em <- scanone(hd, method="em")
out.hk <- scanone(hd, method="hk")
out.imp <- scanone(hd, method="imp")
write.table(out.hk[], file = outhk, sep = "\t",quote = F)
write.table(out.hk[], file = outem, sep = "\t",quote = F)
write.table(out.hk[], file = outimp, sep = "\t",quote = F)
# stats
sink("samples.single.qtl.txt")
print("#summary of out.em")
summary(out.em)
print("#summary of out.hk")
summary(out.hk)
print("#summary of out.imp")
summary(out.imp)
sink()
#plot
pdf(file = "out.em.pdf")
plot(out.em)
dev.off()
pdf(file = "out.hk.pdf")
plot(out.hk)
dev.off()
pdf(file = "out.imp.pdf")
plot(out.imp)
dev.off()
#pdf(file = "out.hk.chr01.pdf")
#plot(out.hk, chr="01")
#dev.off()
###################### Permutation tests ######################
operm <- scanone(hd, method="hk", n.perm=1000)
#stats
sink("Permutation.tests.txt")
print("#summary of operm")
summary(operm)
print("#summary of operm, 0.01 and 0.05")
summary(operm, alpha=c(0.01, 0.05))
print("#summary of operm, 1% significance level")
summary(out.hk, perms=operm, alpha=0.01, pvalues=TRUE)
print("#summary of operm, 5% significance level")
summary(out.hk, perms=operm, alpha=0.05, pvalues=TRUE)
sink()
#plot
pdf(file = "operm.pdf")
plot(operm)
dev.off()
###################### Interval estimates of QTL location ######################
sink("Interval.estimates.hk.1.8_LOD.ex2marker.txt")
print("#LOD support intervals, 1.8-LOD, expand to marker")
lodint(out.hk, chr="01", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="02", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="03", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="04", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="05", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="06", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="07", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="08", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="09", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="10", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="11", drop=1.8, expandtomarkers=TRUE)
lodint(out.hk, chr="12", drop=1.8, expandtomarkers=TRUE)
sink()
sink("Interval.estimates.hk.Bayes_0.95.ex2marker.txt")
print("#Bayes credible intervals, 95%, expand to marker")
bayesint(out.hk, chr="01", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="02", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="03", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="04", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="05", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="06", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="07", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="08", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="09", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="10", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="11", prob=0.95, expandtomarkers=TRUE)
bayesint(out.hk, chr="12", prob=0.95, expandtomarkers=TRUE)
sink()
|
\name{annotateTrans}
\alias{annotateTrans}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
A function to annotate a transcript grl with variant information.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Given a txbd, coverage RLE and set of functional variants, this function
will return a GRangesList of transcripts annotated with additional
metadata which includes the fraction of the transcript that is callable
defined by the function isCallable and the number of functional variants
that fall in the transcript.
}
\usage{
annotateTrans(txdb, cov, anno_gr)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{txdb}{
%% ~~Describe \code{txdb} here~~
a txdb object
}
\item{cov}{
%% ~~Describe \code{cov} here~~
A coverage RLE as generated by the getCov function
}
\item{anno_gr}{
%% ~~Describe \code{anno_gr} here~~
a GRanges object with variants annotaed for transcript occurance and
consequence. The transcipt IDs are assumed to be ref_seq IDs.
}
\item{cores}{
%% ~~Describe \code{cores} here~~
Number of cores to be used in the parallel aspects of the
code. Setting cores to 1 will run on a single core.
}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
Returns a GRangesList of the transcripts with metadata columns
added for the fraction of the cds or exon region that is considered
callable and the number of protein altering mutations found in the total
cds regions.
}
\author{
%% ~~who you are~~
Jeremiah Degenhardt
}
\keyword{internal}
| /oldman/annotateTrans.Rd | no_license | lawremi/VariantTools | R | false | false | 1,731 | rd | \name{annotateTrans}
\alias{annotateTrans}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
A function to annotate a transcript grl with variant information.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Given a txbd, coverage RLE and set of functional variants, this function
will return a GRangesList of transcripts annotated with additional
metadata which includes the fraction of the transcript that is callable
defined by the function isCallable and the number of functional variants
that fall in the transcript.
}
\usage{
annotateTrans(txdb, cov, anno_gr)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{txdb}{
%% ~~Describe \code{txdb} here~~
a txdb object
}
\item{cov}{
%% ~~Describe \code{cov} here~~
A coverage RLE as generated by the getCov function
}
\item{anno_gr}{
%% ~~Describe \code{anno_gr} here~~
a GRanges object with variants annotaed for transcript occurance and
consequence. The transcipt IDs are assumed to be ref_seq IDs.
}
\item{cores}{
%% ~~Describe \code{cores} here~~
Number of cores to be used in the parallel aspects of the
code. Setting cores to 1 will run on a single core.
}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
Returns a GRangesList of the transcripts with metadata columns
added for the fraction of the cds or exon region that is considered
callable and the number of protein altering mutations found in the total
cds regions.
}
\author{
%% ~~who you are~~
Jeremiah Degenhardt
}
\keyword{internal}
|
##########################################################
## Demo: Instructive destructive example
##########################################################
require(distr)
options("newDevice"=TRUE)
## package "distr" encourages
## consistency but does not
## enforce it---so in general
## d o n o t m o d i f y
## slots d,p,q,r!
N <- Norm()
B <- Binom()
N@d <- B@d
plot(N)
### consequence: the slots of N
## are no longer consistent!!
| /demo/destructive.R | no_license | cran/distr | R | false | false | 466 | r | ##########################################################
## Demo: Instructive destructive example
##########################################################
require(distr)
options("newDevice"=TRUE)
## package "distr" encourages
## consistency but does not
## enforce it---so in general
## d o n o t m o d i f y
## slots d,p,q,r!
N <- Norm()
B <- Binom()
N@d <- B@d
plot(N)
### consequence: the slots of N
## are no longer consistent!!
|
mloop <- function(cx=0,cy=0,retention=0.2,b.x=0.6,b.y=0.8,n=1,m=1,sd.x=0,sd.y=0,phase.angle=0,n.points=24,period=24,extended.classical=FALSE,seed=NULL) {
if (!is.null(seed)) set.seed(seed)
if (extended.classical==FALSE) {
x<-cx+b.x*cos((1:n.points)/period*2*pi+phase.angle/180*pi)+rnorm(n.points,0,sd.x)
y<-cy+retention*sin((1:n.points)/period*2*pi+phase.angle/180*pi)^m+b.y*cos((1:n.points)/period*2*pi+phase.angle/180*pi)^n+rnorm(n.points,0,sd.y)
}
else {
direc<-sign(cos((1:n.points)/period*2*pi+phase.angle/180*pi))
x<-cx+b.x*cos((1:n.points)/period*2*pi+phase.angle/180*pi)+rnorm(n.points,0,sd.x)
y<-cy+retention*sin((1:n.points)/period*2*pi+phase.angle/180*pi)^m+direc*(b.y*abs(cos((1:n.points)/period*2*pi+phase.angle/180*pi))^n)+rnorm(n.points,0,sd.y)
}
if (n==1) beta.split.angle<-atan2(b.y,b.x)
else if (n >= 2) beta.split.angle <- 0
else beta.split.angle<-NA
hysteresis.x <- 1/sqrt(1+(b.y/retention)^(2/m))
coercion <- hysteresis.x*b.x
hysteresis.y <- retention/b.y
area <- (0.5/(beta((m+3)/2,(m+3)/2)*(m+2))+1/beta((m+1)/2,(m+1)/2)-1/beta((m+3)/2,(m-1)/2))/(2^m)*pi*abs(retention*b.x)
if ((n%%2)!=1 | (m%%2)!=1) warning("Will not be an actual hysteresis loop if m is not odd, check plot.")
ans <- list("values"=c("m"=m,"n"=n, "b.x"=b.x,"b.y"=b.y,"phase.angle"=phase.angle,"cx"=cx,"cy"=cy,"retention"=retention,
"coercion"=coercion,"area"=area, "beta.split.angle"=beta.split.angle,"hysteresis.x"=hysteresis.x, "hysteresis.y"=hysteresis.y),"x"=x,"y"=y)
class(ans) <- "hysteresisloop"
ans
}
| /R/mloop.r | no_license | aparkhurst/hysteresis-2.5 | R | false | false | 1,560 | r | mloop <- function(cx=0,cy=0,retention=0.2,b.x=0.6,b.y=0.8,n=1,m=1,sd.x=0,sd.y=0,phase.angle=0,n.points=24,period=24,extended.classical=FALSE,seed=NULL) {
if (!is.null(seed)) set.seed(seed)
if (extended.classical==FALSE) {
x<-cx+b.x*cos((1:n.points)/period*2*pi+phase.angle/180*pi)+rnorm(n.points,0,sd.x)
y<-cy+retention*sin((1:n.points)/period*2*pi+phase.angle/180*pi)^m+b.y*cos((1:n.points)/period*2*pi+phase.angle/180*pi)^n+rnorm(n.points,0,sd.y)
}
else {
direc<-sign(cos((1:n.points)/period*2*pi+phase.angle/180*pi))
x<-cx+b.x*cos((1:n.points)/period*2*pi+phase.angle/180*pi)+rnorm(n.points,0,sd.x)
y<-cy+retention*sin((1:n.points)/period*2*pi+phase.angle/180*pi)^m+direc*(b.y*abs(cos((1:n.points)/period*2*pi+phase.angle/180*pi))^n)+rnorm(n.points,0,sd.y)
}
if (n==1) beta.split.angle<-atan2(b.y,b.x)
else if (n >= 2) beta.split.angle <- 0
else beta.split.angle<-NA
hysteresis.x <- 1/sqrt(1+(b.y/retention)^(2/m))
coercion <- hysteresis.x*b.x
hysteresis.y <- retention/b.y
area <- (0.5/(beta((m+3)/2,(m+3)/2)*(m+2))+1/beta((m+1)/2,(m+1)/2)-1/beta((m+3)/2,(m-1)/2))/(2^m)*pi*abs(retention*b.x)
if ((n%%2)!=1 | (m%%2)!=1) warning("Will not be an actual hysteresis loop if m is not odd, check plot.")
ans <- list("values"=c("m"=m,"n"=n, "b.x"=b.x,"b.y"=b.y,"phase.angle"=phase.angle,"cx"=cx,"cy"=cy,"retention"=retention,
"coercion"=coercion,"area"=area, "beta.split.angle"=beta.split.angle,"hysteresis.x"=hysteresis.x, "hysteresis.y"=hysteresis.y),"x"=x,"y"=y)
class(ans) <- "hysteresisloop"
ans
}
|
#' Partial Martingale Difference Divergence
#'
#' \code{pmdd} measures conditional mean dependence of \code{Y} given \code{X} adjusting for the
#' dependence on \code{Z}, where each contains one variable (univariate) or more variables (multivariate).
#' Only the U-centering approach is applied.
#'
#' @param X A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param Y A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param Z A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#'
#' @return \code{pmdd} returns the squared partial martingale difference divergence
#' of \code{Y} given \code{X} adjusting for the dependence on \code{Z}.
#'
#' @references Park, T., Shao, X., and Yao, S. (2015).
#' Partial martingale difference correlation.
#' Electronic Journal of Statistics, 9(1), 1492-1517.
#' \url{http://dx.doi.org/10.1214/15-EJS1047}.
#'
#' @importFrom stats dist
#'
#' @include cmdm_functions.R
#'
#' @export
#'
#' @examples
#' # X, Y, Z are vectors with 10 samples and 1 variable
#' X <- rnorm(10)
#' Y <- rnorm(10)
#' Z <- rnorm(10)
#'
#' pmdd(X, Y, Z)
#'
#' # X, Y, Z are 10 x 2 matrices with 10 samples and 2 variables
#' X <- matrix(rnorm(10 * 2), 10, 2)
#' Y <- matrix(rnorm(10 * 2), 10, 2)
#' Z <- matrix(rnorm(10 * 2), 10, 2)
#'
#' pmdd(X, Y, Z)
pmdd <- function(X, Y, Z) {
X <- as.matrix(X)
Y <- as.matrix(Y)
Z <- as.matrix(Z)
n <- nrow(X)
if (n != nrow(Y) || n != nrow(Z)) {
stop("The dimensions of X, Y, Z do not agree.")
}
p <- ncol(X)
q <- ncol(Y)
r <- ncol(Z)
W <- cbind(X, Z)
D <- u.center(W)
# A <- u.center(X)
B <- u.center(0.5 * as.matrix(dist(Y))^2)
C <- u.center(Z)
beta <- u.inner(B, C) / u.inner(C, C)
proj <- B - beta * C
pmdd <- u.inner(proj, D)
return(pmdd)
}
| /R/pmdd.R | no_license | cran/EDMeasure | R | false | false | 1,961 | r | #' Partial Martingale Difference Divergence
#'
#' \code{pmdd} measures conditional mean dependence of \code{Y} given \code{X} adjusting for the
#' dependence on \code{Z}, where each contains one variable (univariate) or more variables (multivariate).
#' Only the U-centering approach is applied.
#'
#' @param X A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param Y A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param Z A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#'
#' @return \code{pmdd} returns the squared partial martingale difference divergence
#' of \code{Y} given \code{X} adjusting for the dependence on \code{Z}.
#'
#' @references Park, T., Shao, X., and Yao, S. (2015).
#' Partial martingale difference correlation.
#' Electronic Journal of Statistics, 9(1), 1492-1517.
#' \url{http://dx.doi.org/10.1214/15-EJS1047}.
#'
#' @importFrom stats dist
#'
#' @include cmdm_functions.R
#'
#' @export
#'
#' @examples
#' # X, Y, Z are vectors with 10 samples and 1 variable
#' X <- rnorm(10)
#' Y <- rnorm(10)
#' Z <- rnorm(10)
#'
#' pmdd(X, Y, Z)
#'
#' # X, Y, Z are 10 x 2 matrices with 10 samples and 2 variables
#' X <- matrix(rnorm(10 * 2), 10, 2)
#' Y <- matrix(rnorm(10 * 2), 10, 2)
#' Z <- matrix(rnorm(10 * 2), 10, 2)
#'
#' pmdd(X, Y, Z)
pmdd <- function(X, Y, Z) {
X <- as.matrix(X)
Y <- as.matrix(Y)
Z <- as.matrix(Z)
n <- nrow(X)
if (n != nrow(Y) || n != nrow(Z)) {
stop("The dimensions of X, Y, Z do not agree.")
}
p <- ncol(X)
q <- ncol(Y)
r <- ncol(Z)
W <- cbind(X, Z)
D <- u.center(W)
# A <- u.center(X)
B <- u.center(0.5 * as.matrix(dist(Y))^2)
C <- u.center(Z)
beta <- u.inner(B, C) / u.inner(C, C)
proj <- B - beta * C
pmdd <- u.inner(proj, D)
return(pmdd)
}
|
#' Add edges and attributes to graph from a table
#' @description Add edges and their attributes to an
#' existing graph object from data in a CSV file or a
#' data frame.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param table either a path to a CSV file, or, a data
#' frame object.
#' @param from_col the name of the table column from
#' which edges originate.
#' @param to_col the name of the table column to
#' which edges terminate.
#' @param ndf_mapping a single character value for
#' the mapping of the \code{from} and \code{to} columns
#' in the external table (supplied as \code{from_col}
#' and \code{to_col}, respectively) to a column in the
#' graph's internal node data frame (ndf).
#' @param rel_col an option to apply a column of data
#' in the table as \code{rel} attribute values.
#' @param set_rel an optional string to apply a
#' \code{rel} attribute to all edges created from the
#' table records.
#' @param drop_cols an optional character vector for
#' dropping columns from the incoming data.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' \dontrun{
#' # Create an empty graph and then add
#' # nodes to it from a CSV file; in this case
#' # we are using the `currencies` CSV file
#' # that's available in the package
#' graph <-
#' create_graph() %>%
#' add_nodes_from_table(
#' system.file("extdata", "currencies.csv",
#' package = "DiagrammeR"))
#'
#' # Now we want to add edges to the graph
#' # using a similar CSV file that contains
#' # exchange rates between several currencies;
#' # the common attribute is the ISO-4217
#' # currency code
#' graph_1 <-
#' graph %>%
#' add_edges_from_table(
#' system.file("extdata", "usd_exchange_rates.csv",
#' package = "DiagrammeR"),
#' from_col = "from_currency",
#' to_col = "to_currency",
#' ndf_mapping = "iso_4217_code")
#'
#' # View part of the graph's internal edge data
#' # frame (edf) using `get_edge_df()`
#' graph_1 %>% get_edge_df() %>% head()
#' #> id from to rel cost_unit
#' #> 1 1 148 1 <NA> 0.272300
#' #> 2 2 148 2 <NA> 0.015210
#' #> 3 3 148 3 <NA> 0.008055
#' #> 4 4 148 4 <NA> 0.002107
#' #> 5 5 148 5 <NA> 0.565000
#' #> 6 6 148 6 <NA> 0.006058
#'
#' # If you would like to assign any of the table's
#' # columns as `rel` attribute, this can done with
#' # the `rel_col` argument; to set a static `rel`
#' # attribute for all edges, use `set_rel`
#' graph_2 <-
#' graph %>%
#' add_edges_from_table(
#' system.file("extdata", "usd_exchange_rates.csv",
#' package = "DiagrammeR"),
#' from_col = "from_currency",
#' to_col = "to_currency",
#' ndf_mapping = "iso_4217_code",
#' set_rel = "from_usd")
#'
#' # View part of the graph's internal edge data
#' # frame (edf) using `get_edge_df()`
#' graph_2 %>%
#' get_edge_df() %>%
#' head()
#' #> id from to rel cost_unit
#' #> 1 1 148 1 from_usd 0.272300
#' #> 2 2 148 2 from_usd 0.015210
#' #> 3 3 148 3 from_usd 0.008055
#' #> 4 4 148 4 from_usd 0.002107
#' #> 5 5 148 5 from_usd 0.565000
#' #> 6 6 148 6 from_usd 0.006058
#' }
#' @importFrom utils read.csv
#' @importFrom stats setNames
#' @importFrom tibble as_tibble
#' @importFrom dplyr left_join select select_ rename mutate mutate_ bind_cols everything distinct
#' @importFrom tidyr unnest_ drop_na_
#' @export add_edges_from_table
add_edges_from_table <- function(graph,
table,
from_col,
to_col,
ndf_mapping,
rel_col = NULL,
set_rel = NULL,
drop_cols = NULL) {
# Get the time of function start
time_function_start <- Sys.time()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop("The graph object is not valid.")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
stop("The graph contains no nodes, so, edges cannot be added.")
}
# Create bindings for specific variables
rel <- id <- from <- to <- NULL
# Determine whether the table is a file connection
# to a CSV file or a data frame
if (inherits(table, "character")) {
# Load in CSV file
csv <- utils::read.csv(table, stringsAsFactors = FALSE)
} else if (inherits(table, "data.frame")) {
# Rename `table` object as `csv`
csv <- table
}
# Verify that value for `from_col` is in the table
if (!(from_col %in% colnames(csv))) {
stop("The value specified in `from_col` is not in the table.")
}
# Verify that value for `to_col` is in the table
if (!(to_col %in% colnames(csv))) {
stop("The value specified in `to_col` is not in the table.")
}
# Verify that value for `ndf_mapping` is in the
# graph's ndf
if (!(ndf_mapping %in% colnames(get_node_df(graph)))) {
stop("The value specified in `ndf_mapping` is not in the graph.")
}
# If values for `drop_cols` provided, filter the CSV
# columns by those named columns
if (!is.null(drop_cols)) {
columns_retained <-
which(!(colnames(csv) %in% drop_cols))
csv <- csv[, columns_retained]
}
# Optionally set the `rel` attribute from a
# specified column in the CSV
if (!is.null(rel_col)) {
if (any(colnames(csv) == rel_col)) {
colnames(csv)[which(colnames(csv) == rel_col)] <- "rel"
csv <- mutate(csv, rel = as.character(rel))
}
}
# Extract the ndf from the graph
ndf <- graph$nodes_df
# Get the column names from `csv` into a list,
# and, add `id` to the list; this list is used
# for the standard evaluation version of dplyr's
# `select()` (`select_()`)
csv_colnames <- list()
if (length(setdiff(colnames(csv), c(from_col, to_col))) > 0) {
for (i in 1:length(setdiff(colnames(csv), c(from_col, to_col)))) {
csv_colnames[i] <- setdiff(colnames(csv), c(from_col, to_col))[i]
}
csv_colnames[(length(setdiff(colnames(csv), c(from_col, to_col))) + 1)] <- "id"
} else {
csv_colnames[1] <- "id"
}
# Expand the df to capture several space-delimited
# values in the `to` column; drop NA values in the
# `to_col` and the `from_col` columns
csv <-
csv %>%
dplyr::mutate_(.dots = setNames(paste0("strsplit(", to_col, ", \" \")"), to_col)) %>%
tidyr::unnest_(to_col) %>%
tidyr::drop_na_(to_col) %>%
tidyr::drop_na_(from_col)
# Get the `from` col
col_from <-
tibble::as_tibble(csv) %>%
dplyr::left_join(ndf,
by = stats::setNames(ndf_mapping, from_col)) %>%
dplyr::select_(.dots = csv_colnames) %>%
dplyr::rename(from = id) %>%
dplyr::mutate(from = as.integer(from))
# Get the `to` col
col_to <-
tibble::as_tibble(csv) %>%
dplyr::left_join(ndf,
by = stats::setNames(ndf_mapping, to_col)) %>%
dplyr::distinct() %>%
dplyr::select_(.dots = csv_colnames) %>%
dplyr::rename(to = id) %>%
dplyr::mutate(to = as.integer(to)) %>%
dplyr::select(to)
# Combine the `from` and `to` columns together along
# with a new `rel` column (filled with NAs) and additional
# columns from the CSV
edf <-
col_from %>%
dplyr::bind_cols(col_to)
# Add in a `rel` column (filled with NAs) if it's not
# already in the table
if (!("rel" %in% colnames(edf))) {
edf <-
edf %>%
dplyr::mutate(rel = as.character(NA))
}
# Use the `select()` function to arrange the
# column rows and then convert to a data frame
edf <-
edf %>%
dplyr::select(from, to, rel, dplyr::everything()) %>%
as.data.frame(stringsAsFactors = FALSE)
# Remove any rows where there is an NA in either
# `from` or `to`
edf <- edf[which(!is.na(edf$from) & !is.na(edf$to)), ]
rownames(edf) <- NULL
# Add in an `id` column
edf <-
dplyr::bind_cols(
data.frame(id = as.integer(1:nrow(edf))),
edf)
# Optionally set the `rel` attribute with a single
# value repeated down
if (is.null(rel_col) & !is.null(set_rel)) {
edf <-
edf %>%
dplyr::mutate(rel = as.character(set_rel))
}
# Add the edf to the graph object
if (is.null(graph$edges_df)) {
graph$edges_df <- edf
} else {
graph$edges_df <- dplyr::bind_rows(graph$edges_df, edf)
}
# Update the `last_edge` value in the graph
graph$last_edge <- nrow(graph$edges_df)
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = "add_edges_from_table",
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
return(graph)
}
| /R/add_edges_from_table.R | no_license | ekstroem/DiagrammeR | R | false | false | 9,117 | r | #' Add edges and attributes to graph from a table
#' @description Add edges and their attributes to an
#' existing graph object from data in a CSV file or a
#' data frame.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param table either a path to a CSV file, or, a data
#' frame object.
#' @param from_col the name of the table column from
#' which edges originate.
#' @param to_col the name of the table column to
#' which edges terminate.
#' @param ndf_mapping a single character value for
#' the mapping of the \code{from} and \code{to} columns
#' in the external table (supplied as \code{from_col}
#' and \code{to_col}, respectively) to a column in the
#' graph's internal node data frame (ndf).
#' @param rel_col an option to apply a column of data
#' in the table as \code{rel} attribute values.
#' @param set_rel an optional string to apply a
#' \code{rel} attribute to all edges created from the
#' table records.
#' @param drop_cols an optional character vector for
#' dropping columns from the incoming data.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' \dontrun{
#' # Create an empty graph and then add
#' # nodes to it from a CSV file; in this case
#' # we are using the `currencies` CSV file
#' # that's available in the package
#' graph <-
#' create_graph() %>%
#' add_nodes_from_table(
#' system.file("extdata", "currencies.csv",
#' package = "DiagrammeR"))
#'
#' # Now we want to add edges to the graph
#' # using a similar CSV file that contains
#' # exchange rates between several currencies;
#' # the common attribute is the ISO-4217
#' # currency code
#' graph_1 <-
#' graph %>%
#' add_edges_from_table(
#' system.file("extdata", "usd_exchange_rates.csv",
#' package = "DiagrammeR"),
#' from_col = "from_currency",
#' to_col = "to_currency",
#' ndf_mapping = "iso_4217_code")
#'
#' # View part of the graph's internal edge data
#' # frame (edf) using `get_edge_df()`
#' graph_1 %>% get_edge_df() %>% head()
#' #> id from to rel cost_unit
#' #> 1 1 148 1 <NA> 0.272300
#' #> 2 2 148 2 <NA> 0.015210
#' #> 3 3 148 3 <NA> 0.008055
#' #> 4 4 148 4 <NA> 0.002107
#' #> 5 5 148 5 <NA> 0.565000
#' #> 6 6 148 6 <NA> 0.006058
#'
#' # If you would like to assign any of the table's
#' # columns as `rel` attribute, this can done with
#' # the `rel_col` argument; to set a static `rel`
#' # attribute for all edges, use `set_rel`
#' graph_2 <-
#' graph %>%
#' add_edges_from_table(
#' system.file("extdata", "usd_exchange_rates.csv",
#' package = "DiagrammeR"),
#' from_col = "from_currency",
#' to_col = "to_currency",
#' ndf_mapping = "iso_4217_code",
#' set_rel = "from_usd")
#'
#' # View part of the graph's internal edge data
#' # frame (edf) using `get_edge_df()`
#' graph_2 %>%
#' get_edge_df() %>%
#' head()
#' #> id from to rel cost_unit
#' #> 1 1 148 1 from_usd 0.272300
#' #> 2 2 148 2 from_usd 0.015210
#' #> 3 3 148 3 from_usd 0.008055
#' #> 4 4 148 4 from_usd 0.002107
#' #> 5 5 148 5 from_usd 0.565000
#' #> 6 6 148 6 from_usd 0.006058
#' }
#' @importFrom utils read.csv
#' @importFrom stats setNames
#' @importFrom tibble as_tibble
#' @importFrom dplyr left_join select select_ rename mutate mutate_ bind_cols everything distinct
#' @importFrom tidyr unnest_ drop_na_
#' @export add_edges_from_table
add_edges_from_table <- function(graph,
table,
from_col,
to_col,
ndf_mapping,
rel_col = NULL,
set_rel = NULL,
drop_cols = NULL) {
# Get the time of function start
time_function_start <- Sys.time()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop("The graph object is not valid.")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
stop("The graph contains no nodes, so, edges cannot be added.")
}
# Create bindings for specific variables
rel <- id <- from <- to <- NULL
# Determine whether the table is a file connection
# to a CSV file or a data frame
if (inherits(table, "character")) {
# Load in CSV file
csv <- utils::read.csv(table, stringsAsFactors = FALSE)
} else if (inherits(table, "data.frame")) {
# Rename `table` object as `csv`
csv <- table
}
# Verify that value for `from_col` is in the table
if (!(from_col %in% colnames(csv))) {
stop("The value specified in `from_col` is not in the table.")
}
# Verify that value for `to_col` is in the table
if (!(to_col %in% colnames(csv))) {
stop("The value specified in `to_col` is not in the table.")
}
# Verify that value for `ndf_mapping` is in the
# graph's ndf
if (!(ndf_mapping %in% colnames(get_node_df(graph)))) {
stop("The value specified in `ndf_mapping` is not in the graph.")
}
# If values for `drop_cols` provided, filter the CSV
# columns by those named columns
if (!is.null(drop_cols)) {
columns_retained <-
which(!(colnames(csv) %in% drop_cols))
csv <- csv[, columns_retained]
}
# Optionally set the `rel` attribute from a
# specified column in the CSV
if (!is.null(rel_col)) {
if (any(colnames(csv) == rel_col)) {
colnames(csv)[which(colnames(csv) == rel_col)] <- "rel"
csv <- mutate(csv, rel = as.character(rel))
}
}
# Extract the ndf from the graph
ndf <- graph$nodes_df
# Get the column names from `csv` into a list,
# and, add `id` to the list; this list is used
# for the standard evaluation version of dplyr's
# `select()` (`select_()`)
csv_colnames <- list()
if (length(setdiff(colnames(csv), c(from_col, to_col))) > 0) {
for (i in 1:length(setdiff(colnames(csv), c(from_col, to_col)))) {
csv_colnames[i] <- setdiff(colnames(csv), c(from_col, to_col))[i]
}
csv_colnames[(length(setdiff(colnames(csv), c(from_col, to_col))) + 1)] <- "id"
} else {
csv_colnames[1] <- "id"
}
# Expand the df to capture several space-delimited
# values in the `to` column; drop NA values in the
# `to_col` and the `from_col` columns
csv <-
csv %>%
dplyr::mutate_(.dots = setNames(paste0("strsplit(", to_col, ", \" \")"), to_col)) %>%
tidyr::unnest_(to_col) %>%
tidyr::drop_na_(to_col) %>%
tidyr::drop_na_(from_col)
# Get the `from` col
col_from <-
tibble::as_tibble(csv) %>%
dplyr::left_join(ndf,
by = stats::setNames(ndf_mapping, from_col)) %>%
dplyr::select_(.dots = csv_colnames) %>%
dplyr::rename(from = id) %>%
dplyr::mutate(from = as.integer(from))
# Get the `to` col
col_to <-
tibble::as_tibble(csv) %>%
dplyr::left_join(ndf,
by = stats::setNames(ndf_mapping, to_col)) %>%
dplyr::distinct() %>%
dplyr::select_(.dots = csv_colnames) %>%
dplyr::rename(to = id) %>%
dplyr::mutate(to = as.integer(to)) %>%
dplyr::select(to)
# Combine the `from` and `to` columns together along
# with a new `rel` column (filled with NAs) and additional
# columns from the CSV
edf <-
col_from %>%
dplyr::bind_cols(col_to)
# Add in a `rel` column (filled with NAs) if it's not
# already in the table
if (!("rel" %in% colnames(edf))) {
edf <-
edf %>%
dplyr::mutate(rel = as.character(NA))
}
# Use the `select()` function to arrange the
# column rows and then convert to a data frame
edf <-
edf %>%
dplyr::select(from, to, rel, dplyr::everything()) %>%
as.data.frame(stringsAsFactors = FALSE)
# Remove any rows where there is an NA in either
# `from` or `to`
edf <- edf[which(!is.na(edf$from) & !is.na(edf$to)), ]
rownames(edf) <- NULL
# Add in an `id` column
edf <-
dplyr::bind_cols(
data.frame(id = as.integer(1:nrow(edf))),
edf)
# Optionally set the `rel` attribute with a single
# value repeated down
if (is.null(rel_col) & !is.null(set_rel)) {
edf <-
edf %>%
dplyr::mutate(rel = as.character(set_rel))
}
# Add the edf to the graph object
if (is.null(graph$edges_df)) {
graph$edges_df <- edf
} else {
graph$edges_df <- dplyr::bind_rows(graph$edges_df, edf)
}
# Update the `last_edge` value in the graph
graph$last_edge <- nrow(graph$edges_df)
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = "add_edges_from_table",
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
return(graph)
}
|
# tests for listing gists
context("gists")
test_that("listing gists works", {
skip_on_cran()
expect_is(gists()[[1]], "gist")
expect_equal(length(gists(per_page=2)), 2)
})
test_that("config options work", {
skip_on_cran()
library('httr')
expect_error(gists(config=timeout(0.001)))
})
| /tests/testthat/test-gists.R | permissive | silvrwolfboy/gistr | R | false | false | 303 | r | # tests for listing gists
context("gists")
test_that("listing gists works", {
skip_on_cran()
expect_is(gists()[[1]], "gist")
expect_equal(length(gists(per_page=2)), 2)
})
test_that("config options work", {
skip_on_cran()
library('httr')
expect_error(gists(config=timeout(0.001)))
})
|
#global assignment of project dir -> change to whatever in order to find plots/data/etc.. files
calcAUC <- function(prob, label){
AUC <- NA
# if(!identical(getLevels(label),2)){
# return(NA)
# }
AUC <- try({
(performance(prediction(predictions=prob, labels=label), "auc"))@y.values[[1]]
})
if ('try-error' %in% class(AUC)){
NA
} else {
AUC
}
}
getStatsFromGlmModel <- function(probs, y, knn=FALSE){
if (TRUE == knn){
pred <- as.numeric(probs) - 1
} else {
pred <- rep(0,length(probs))
pred[which(probs > 0.5)] <- 1
}
correct <- (pred == y)
poly2 <- data.frame(trial=-1)
poly2$TP <- length(which(correct & y ==1))
poly2$TN <- length(which(correct & y ==0))
poly2$FP <- length(which(!correct & y ==0))
poly2$FN <- length(which(!correct & y ==1))
poly2$prec <- with(poly2, TP / (TP + FP))
poly2$sens <- with(poly2, TP / (TP + FN))
poly2$errorRate <- 1 - sum(correct)/length(correct)
if (TRUE == knn){
poly2$AUC <- 0
} else {
poly2$AUC <- calcAUC(prob=probs, label=y)
}
poly2
}
makeDir <- function(dir,recursiveCreate=TRUE){
if (!file.exists(dir)){
dir.create(path=dir,showWarnings=TRUE,recursive=recursiveCreate,mode="0755")
}
dir
}
getMemory <- function(){
gettextf("%.2f Mb stored in memory",
sum(sapply(unlist(ls(envir=.GlobalEnv)),
function(x)object.size(get(x,envir=.GlobalEnv))))
/ (1000000))
}
saveFunArgs <- function(fnCall,verbose=TRUE,env=parent.frame(),
file="~/sandbox/objects.R",append=FALSE){
fnCall <- standardise_call(fnCall)
stopifnot(is.call(fnCall))
if(identical(append,TRUE)){
append.file <- file(file, open="a")
} else {
append.file <- file(file, open="w")
}
values <- as.character(fnCall[-1])
variables <- names(fnCall)[-1]
call.list <- as.list(fnCall)[-1]
if(verbose){
print(fnCall)
print(paste0(variables, " = ", values, " #", sapply(fnCall[-1], typeof)))
}
dput(date(), file = append.file)
dput(fnCall, file = append.file)
for(i in which(variables != "")){
# val.local <- ifelse(is.language(call.list[i][[1]]),eval(parse(text=values[i]), env), call.list[i][[1]])
if(is.language(call.list[i][[1]])){val.local <- eval(parse(text=values[i]), env)}else{val.local <- call.list[i][[1]]}
assign(variables[i], val.local, env)
var.char <- variables[i]
cat(paste(var.char, " = "),file=append.file)
dput(eval(as.name(var.char),env), file=append.file)
}
cat(paste(fnCall[[1]],"(",paste0(variables, collapse=","), ")",sep=""),file=append.file)
cat("\n\n\n", file=append.file)
}
testSaveFunArgs <- function(){
y<- 3
callExpr <- quote(runif(n=1 + y, min=dim(iris)[1], max=dim(iris)[1] + 1))
saveFunArgs(fnCall=callExpr,verbose=FALSE,
file = "~/sandbox/objects2.R")
}
############################################
evalFunArgs <- function(fnCall,verbose=TRUE,env=parent.frame()){
fnCall <- standardise_call(fnCall)
stopifnot(is.call(fnCall))
values <- as.character(fnCall[-1])
variables <- names(fnCall)[-1]
call.list <- as.list(fnCall)[-1]
if(verbose){
print(fnCall)
print(paste0(variables, " = ", values, " #", sapply(fnCall[-1], typeof)))
}
for(i in which(variables != "")){
val.local <- ifelse(is.language(call.list[i][[1]]),
eval(parse(text=values[i]), env),
call.list[i][[1]])
assign(variables[i], val.local, env)
}
}
#fnCall <- quote(read.csv("imp", header=one() * 4, sep=as.character(header)))
#evalFunArgs(fnCall)
#library(pryr)
standardise_call <- function(call, env = parent.frame()){
stopifnot(is.call(call))
fn <- eval(call[[1]], env)
if(is.primitive(fn)) return(fn)
match.call(fn, call)
}
modify_call <- function(call, new_args) {
call <- standardise_call(call)
nms <- names(new_args) %||% rep("", length(new_args))
if (any(nms == "")) {
stop("All new arguments must be named", call. = FALSE)
}
for(nm in nms) {
call[[nm]] <- new_args[[nm]]
}
call
}
removeMaxFiles <- function(checkFile){
# TODO figure out what is going on here...
#mb.size <- (file.info(checkFile)$size)/(1000 * 1000)
#if (mb.size > 450){
# file.remove(checkFile)
#}
}
applyGsubVec <- function(x,pattern,replacement){
sapply(x,function(y)gsub(x=y, pattern=pattern, replacement=replacement))
}
| /mlAlgoAW/analysis/predLib.R | no_license | stjordanis/enhancer_pred | R | false | false | 4,427 | r |
#global assignment of project dir -> change to whatever in order to find plots/data/etc.. files
calcAUC <- function(prob, label){
AUC <- NA
# if(!identical(getLevels(label),2)){
# return(NA)
# }
AUC <- try({
(performance(prediction(predictions=prob, labels=label), "auc"))@y.values[[1]]
})
if ('try-error' %in% class(AUC)){
NA
} else {
AUC
}
}
getStatsFromGlmModel <- function(probs, y, knn=FALSE){
if (TRUE == knn){
pred <- as.numeric(probs) - 1
} else {
pred <- rep(0,length(probs))
pred[which(probs > 0.5)] <- 1
}
correct <- (pred == y)
poly2 <- data.frame(trial=-1)
poly2$TP <- length(which(correct & y ==1))
poly2$TN <- length(which(correct & y ==0))
poly2$FP <- length(which(!correct & y ==0))
poly2$FN <- length(which(!correct & y ==1))
poly2$prec <- with(poly2, TP / (TP + FP))
poly2$sens <- with(poly2, TP / (TP + FN))
poly2$errorRate <- 1 - sum(correct)/length(correct)
if (TRUE == knn){
poly2$AUC <- 0
} else {
poly2$AUC <- calcAUC(prob=probs, label=y)
}
poly2
}
makeDir <- function(dir,recursiveCreate=TRUE){
if (!file.exists(dir)){
dir.create(path=dir,showWarnings=TRUE,recursive=recursiveCreate,mode="0755")
}
dir
}
getMemory <- function(){
gettextf("%.2f Mb stored in memory",
sum(sapply(unlist(ls(envir=.GlobalEnv)),
function(x)object.size(get(x,envir=.GlobalEnv))))
/ (1000000))
}
saveFunArgs <- function(fnCall,verbose=TRUE,env=parent.frame(),
file="~/sandbox/objects.R",append=FALSE){
fnCall <- standardise_call(fnCall)
stopifnot(is.call(fnCall))
if(identical(append,TRUE)){
append.file <- file(file, open="a")
} else {
append.file <- file(file, open="w")
}
values <- as.character(fnCall[-1])
variables <- names(fnCall)[-1]
call.list <- as.list(fnCall)[-1]
if(verbose){
print(fnCall)
print(paste0(variables, " = ", values, " #", sapply(fnCall[-1], typeof)))
}
dput(date(), file = append.file)
dput(fnCall, file = append.file)
for(i in which(variables != "")){
# val.local <- ifelse(is.language(call.list[i][[1]]),eval(parse(text=values[i]), env), call.list[i][[1]])
if(is.language(call.list[i][[1]])){val.local <- eval(parse(text=values[i]), env)}else{val.local <- call.list[i][[1]]}
assign(variables[i], val.local, env)
var.char <- variables[i]
cat(paste(var.char, " = "),file=append.file)
dput(eval(as.name(var.char),env), file=append.file)
}
cat(paste(fnCall[[1]],"(",paste0(variables, collapse=","), ")",sep=""),file=append.file)
cat("\n\n\n", file=append.file)
}
testSaveFunArgs <- function(){
y<- 3
callExpr <- quote(runif(n=1 + y, min=dim(iris)[1], max=dim(iris)[1] + 1))
saveFunArgs(fnCall=callExpr,verbose=FALSE,
file = "~/sandbox/objects2.R")
}
############################################
evalFunArgs <- function(fnCall,verbose=TRUE,env=parent.frame()){
fnCall <- standardise_call(fnCall)
stopifnot(is.call(fnCall))
values <- as.character(fnCall[-1])
variables <- names(fnCall)[-1]
call.list <- as.list(fnCall)[-1]
if(verbose){
print(fnCall)
print(paste0(variables, " = ", values, " #", sapply(fnCall[-1], typeof)))
}
for(i in which(variables != "")){
val.local <- ifelse(is.language(call.list[i][[1]]),
eval(parse(text=values[i]), env),
call.list[i][[1]])
assign(variables[i], val.local, env)
}
}
#fnCall <- quote(read.csv("imp", header=one() * 4, sep=as.character(header)))
#evalFunArgs(fnCall)
#library(pryr)
standardise_call <- function(call, env = parent.frame()){
stopifnot(is.call(call))
fn <- eval(call[[1]], env)
if(is.primitive(fn)) return(fn)
match.call(fn, call)
}
modify_call <- function(call, new_args) {
call <- standardise_call(call)
nms <- names(new_args) %||% rep("", length(new_args))
if (any(nms == "")) {
stop("All new arguments must be named", call. = FALSE)
}
for(nm in nms) {
call[[nm]] <- new_args[[nm]]
}
call
}
removeMaxFiles <- function(checkFile){
# TODO figure out what is going on here...
#mb.size <- (file.info(checkFile)$size)/(1000 * 1000)
#if (mb.size > 450){
# file.remove(checkFile)
#}
}
applyGsubVec <- function(x,pattern,replacement){
sapply(x,function(y)gsub(x=y, pattern=pattern, replacement=replacement))
}
|
library(shiny)
ui <- fluidPage(
"Olá, mundo"
)
server <- function(input, output, session) {
}
shinyApp(ui, server)
# library(shiny)
#
# ui <- fluidPage("Olá, mundo!")
#
# server <- function(input, output, session) {
# # O nosso código em R será colocado aqui.
# }
#
# shinyApp(ui, server)
| /scripts/01-ola-mundo.R | no_license | curso-r/latinr-shiny | R | false | false | 303 | r | library(shiny)
ui <- fluidPage(
"Olá, mundo"
)
server <- function(input, output, session) {
}
shinyApp(ui, server)
# library(shiny)
#
# ui <- fluidPage("Olá, mundo!")
#
# server <- function(input, output, session) {
# # O nosso código em R será colocado aqui.
# }
#
# shinyApp(ui, server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/catboost.R
\name{catboost.train}
\alias{catboost.train}
\title{Train the model}
\usage{
catboost.train(learn_pool, test_pool = NULL, params = list())
}
\arguments{
\item{learn_pool}{The dataset used for training the model.
Default value: Required argument}
\item{test_pool}{The dataset used for testing the quality of the model.
Default value: NULL (not used)}
\item{params}{The list of parameters to start training with.
If omitted, default values are used (see The list of parameters).
If set, the passed list of parameters overrides the default values.
Default value: Required argument}
}
\description{
Train the model using a CatBoost dataset.
}
\details{
The list of parameters
\itemize{
\item Common parameters
\itemize{
\item fold_permutation_block_size
Objects in the dataset are grouped in blocks before the random permutations.
This parameter defines the size of the blocks.
The smaller is the value, the slower is the training.
Large values may result in quality degradation.
Default value:
Default value differs depending on the dataset size and ranges from 1 to 256 inclusively
\item ignored_features
Identifiers of features to exclude from training.
The non-negative indices that do not match any features are successfully ignored.
For example, if five features are defined for the objects in the dataset and this parameter
is set to “42”, the corresponding non-existing feature is successfully ignored.
The identifier corresponds to the feature's index.
Feature indices used in train and feature importance are numbered from 0 to featureCount – 1.
If a file is used as input data then any non-feature column types are ignored when calculating these
indices. For example, each row in the input file contains data in the following order:
categorical feature<\code{\t}>target value<\code{\t}> numerical feature. So for the row rock<\code{\t}>0 <\code{\t}>42,
the identifier for the “rock” feature is 0, and for the “42” feature it's 1.
The identifiers of features to exclude should be enumerated at vector.
For example, if training should exclude features with the identifiers
1, 2, 7, 42, 43, 44, 45, the value of this parameter should be set to c(1,2,7,42,43,44,45).
Default value:
None (use all features)
\item use_best_model
If this parameter is set, the number of trees that are saved in the resulting model is defined as follows:
Build the number of trees defined by the training parameters.
\itemize{
\item Identify the iteration with the optimal loss function value.
\item No trees are saved after this iteration.
}
This option requires a test dataset to be provided.
Default value:
FALSE (not used)
\item loss_function
The loss function (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/loss-functions-docpage/#loss-functions})
to use in training. The specified value also determines the machine learning problem to solve.
Format:
<Loss function 1>[:<parameter 1>=<value>:..<parameter N>=<value>:]
Supported loss functions:
\itemize{
\item 'Logloss'
\item 'CrossEntropy'
\item 'RMSE'
\item 'MAE'
\item 'Quantile'
\item 'LogLinQuantile'
\item 'MAPE'
\item 'Poisson'
\item 'QueryRMSE'
\item 'MultiClass'
\item 'MultiClassOneVsAll'
\item 'PairLogit'
}
Supported parameters:
\itemize{
\item alpha - The coefficient used in quantile-based losses ('Quantile' and 'LogLinQuantile'). The default value is 0.5.
For example, if you need to calculate the value of Quantile with the coefficient \eqn{\alpha = 0.1}, use the following construction:
'Quantile:alpha=0.1'
}
Default value:
'RMSE'
\item custom_loss
Loss function (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/loss-functions-docpage/#loss-functions})
values to output during training.
These functions are not optimized and are displayed for informational purposes only.
Format:
c(<Loss function 1>[:<parameter>=<value>],<Loss function 2>[:<parameter>=<value>],...,<Loss function N>[:<parameter>=<value>])
Supported loss functions:
\itemize{
\item 'Logloss'
\item 'CrossEntropy'
\item 'RMSE'
\item 'MAE'
\item 'Quantile'
\item 'LogLinQuantile'
\item 'MAPE'
\item 'Poisson'
\item 'QueryRMSE'
\item 'MultiClass'
\item 'MultiClassOneVsAll'
\item 'PairLogit'
\item 'R2'
\item 'AUC'
\item 'Accuracy'
\item 'Precision'
\item 'Recall'
\item 'F1'
\item 'TotalF1'
\item 'MCC'
\item 'PairAccuracy'
}
Supported parameters:
\itemize{
\item alpha - The coefficient used in quantile-based losses ('Quantile' and 'LogLinQuantile'). The default value is 0.5.
}
For example, if you need to calculate the value of CrossEntropy and Quantile with the coefficient \eqn{\alpha = 0.1}, use the following construction:
c('CrossEntropy') or simply 'CrossEntropy'.
Values of all custom loss functions for learning and test datasets are saved to the Loss function
(see \url{https://tech.yandex.com/catboost/doc/dg/concepts/output-data_error-functions-docpage/#output-data_error-functions})
output files (learn_error.tsv and test_error.tsv respectively). The catalog for these files is specified in the train-dir (train_dir) parameter.
Default value:
None (use one of the loss functions supported by the library)
\item eval_metric
The loss function used for overfitting detection (if enabled) and best model selection (if enabled).
Supported loss functions:
\itemize{
\item 'Logloss'
\item 'CrossEntropy'
\item 'RMSE'
\item 'MAE'
\item 'Quantile'
\item 'LogLinQuantile'
\item 'MAPE'
\item 'Poisson'
\item 'QueryRMSE'
\item 'MultiClass'
\item 'MultiClassOneVsAll'
\item 'PairLogit'
\item 'R2'
\item 'AUC'
\item 'Accuracy'
\item 'Precision'
\item 'Recall'
\item 'F1'
\item 'TotalF1'
\item 'MCC'
\item 'PairAccuracy'
}
Format:
metric_name:param=Value
Examples:
\code{'R2'}
\code{'Quantile:alpha=0.3'}
Default value:
Optimized objective is used
\item iterations
The maximum number of trees that can be built when solving machine learning problems.
When using other parameters that limit the number of iterations, the final number of trees may be less
than the number specified in this parameter.
Default value:
500
\item border
The target border. If the value is strictly greater than this threshold,
it is considered a positive class. Otherwise it is considered a negative class.
The parameter is obligatory if the Logloss function is used, since it uses borders to transform
any given target to a binary target.
Used in binary classification.
Default value:
0.5
\item leaf_estimation_iterations
The number of gradient steps when calculating the values in leaves.
Default value:
1
\item depth
Depth of the tree.
The value can be any integer up to 32. It is recommended to use values in the range [1; 10].
Default value:
6
\item learning_rate
The learning rate.
Used for reducing the gradient step.
Default value:
0.03
\item rsm
Random subspace method. The percentage of features to use at each iteration of building trees. At each iteration, features are selected over again at random.
The value must be in the range [0;1].
Default value:
1
\item random_seed
The random seed used for training.
Default value:
A new random value is selected on each run
\item nan_mode
Way to process nan-values.
Possible values:
\itemize{
\item \code{'Min'}
\item \code{'Max'}
\item \code{'Forbidden'}
}
Default value:
\code{'Min'}
\item od_pval
Use the Overfitting detector (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/overfitting-detector-docpage/#overfitting-detector})
to stop training when the threshold is reached.
Requires that a test dataset was input.
For best results, it is recommended to set a value in the range [10^-10; 10^-2].
The larger the value, the earlier overfitting is detected.
Default value:
The overfitting detection is turned off
\item od_type
The method used to calculate the values in leaves.
Possible values:
\itemize{
\item IncToDec
\item Iter
}
Restriction.
Do not specify the overfitting detector threshold when using the Iter type.
Default value:
'IncToDec'
\item od_wait
The number of iterations to continue the training after the iteration with the optimal loss function value.
The purpose of this parameter differs depending on the selected overfitting detector type:
\itemize{
\item IncToDec — Ignore the overfitting detector when the threshold is reached and continue learning for the specified number of iterations after the iteration with the optimal loss function value.
\item Iter — Consider the model overfitted and stop training after the specified number of iterations since the iteration with the optimal loss function value.
}
Default value:
20
\item leaf_estimation_method
The method used to calculate the values in leaves.
Possible values:
\itemize{
\item Newton
\item Gradient
}
Default value:
Default value depends on the selected loss function
\item l2_leaf_reg
L2 regularization coefficient. Used for leaf value calculation.
Any positive values are allowed.
Default value:
3
\item model_size_reg
Model size regularization coefficient. The influence coefficient of the model size for choosing tree structure.
To get a smaller model size - increase this coefficient.
Any positive values are allowed.
Default value:
0.5
\item has_time
Use the order of objects in the input data (do not perform random permutations during the
Transforming categorical features to numerical features (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/#algorithm-main-stages_cat-to-numberic})
and Choosing the tree structure (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_choose-tree-structure-docpage/#algorithm-main-stages_choose-tree-structure}) stages).
Default value:
FALSE (not used; generate random permutations)
\item name
The experiment name to display in visualization tools (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/visualization-docpage/#visualization}).
Default value:
experiment
\item prediction_type
The format for displaying approximated values in output data.
Possible values:
\itemize{
\item 'Probability'
\item 'Class'
\item 'RawFormulaVal'
}
Default value:
\code{'RawFormulaVal'}
\item fold_len_multiplier
Coefficient for changing the length of folds.
The value must be greater than 1. The best validation result is achieved with minimum values.
With values close to 1 (for example, \eqn{1 + \epsilon}), each iteration takes a quadratic amount of memory and time
for the number of objects in the iteration. Thus, low values are possible only when there is a small number of objects.
Default value:
2
\item class_weights
Classes weights. The values are used as multipliers for the object weights.
Classes are indexed from 0 to classes count – 1. For example, in case of binary classification the classes are indexed 0 and 1.
For examples:
\code{c(0.85, 1.2, 1)}
Default value:
None (the weight for all classes is set to 1)
\item classes_count
The upper limit for the numeric class label. Defines the number of classes for multiclassification.
Only non-negative integers can be specified. The given integer should be greater than any of the target
values.
If this parameter is specified the labels for all classes in the input dataset should be smaller
than the given value.
Default value:
maximum class label + 1
\item one_hot_max_size
Convert the feature to float if the number of different values that it takes exceeds the specified value. Ctrs are not calculated for such features.
The one-vs.-all delimiter is used for the resulting float features.
Default value:
FALSE
Do not convert features to float based on the number of different values
\item random_strength
Score standard deviation multiplier.
Default value:
1
\item bagging_temperature
Controls intensity of Bayesian bagging. The higher the temperature the more aggressive bagging is.
Typical values are in the range \eqn{[0, 1]} (0 is for no bagging).
Possible values are in the range \eqn{[0, +\infty)}.
Default value:
1
}
\item CTR settings
\itemize{
\item ctr_description
Binarization settings for categorical features (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/#algorithm-main-stages_cat-to-numberic}).
Format:
\code{c(<CTR type 1>:[<number of borders 1>:<Binarization type 1>],...,<CTR type N>:[<number of borders N>:<Binarization type N>])}
Components:
\itemize{
\item CTR types:
\itemize{
\item \code{'Borders'}
\item \code{'Buckets'}
\item \code{'BinarizedTargetMeanValue'}
\item \code{'Counter'}
}
\item The number of borders for target binarization. (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/binarization-docpage/#binarization})
Only used for regression problems. Allowed values are integers from 1 to 255 inclusively. The default value is 1.
\item The binarization (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/binarization-docpage/#binarization})
type for the target. Only used for regression problems.
Possible values:
\itemize{
\item \code{'Median'}
\item \code{'Uniform'}
\item \code{'UniformAndQuantiles'}
\item \code{'MaxLogSum'}
\item \code{'MinEntropy'}
\item \code{'GreedyLogSum'}
}
By default, \code{'MinEntropy'}
}
Default value:
\item counter_calc_method
The method for calculating the Counter CTR type for the test dataset.
Possible values:
\itemize{
\item \code{'Full'}
\item \code{'FullTest'}
\item \code{'PrefixTest'}
\item \code{'SkipTest'}
}
Default value: \code{'PrefixTest'}
\item ctr_border_count
The number of splits for categorical features.
Allowed values are integers from 1 to 255 inclusively.
Default value:
50
\item max_ctr_complexity
The maximum number of categorical features that can be combined.
Default value:
4
\item ctr_leaf_count_limit
The maximum number of leafs with categorical features.
If the quantity exceeds the specified value a part of leafs is discarded.
The leafs to be discarded are selected as follows:
\enumerate{
\item The leafs are sorted by the frequency of the values.
\item The top N leafs are selected, where N is the value specified in the parameter.
\item All leafs starting from N+1 are discarded.
}
This option reduces the resulting model size and the amount of memory required for training.
Note that the resulting quality of the model can be affected.
Default value:
None (The number of leafs with categorical features is not limited)
\item store_all_simple_ctr
Ignore categorical features, which are not used in feature combinations,
when choosing candidates for exclusion.
Use this parameter with ctr-leaf-count-limit only.
Default value:
FALSE (Both simple features and feature combinations are taken in account when limiting the number of leafs with categorical features)
}
\item Binarization settings
\itemize{
\item border_count
The number of splits for numerical features. Allowed values are integers from 1 to 255 inclusively.
Default value:
32
\item feature_border_type
The binarization mode (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/binarization-docpage/#binarization})
for numerical features.
Possible values:
\itemize{
\item \code{'Median'}
\item \code{'Uniform'}
\item \code{'UniformAndQuantiles'}
\item \code{'MaxLogSum'}
\item \code{'MinEntropy'}
\item \code{'GreedyLogSum'}
}
Default value:
\code{'MinEntropy'}
}
\item Performance settings
\itemize{
\item thread_count
The number of threads to use when applying the model.
Allows you to optimize the speed of execution. This parameter doesn't affect results.
Default value:
Min(number of processor cores, 8)
}
\item Output settings
\itemize{
\item logging_level
Possible values:
\itemize{
\item \code{'Silent'}
\item \code{'Verbose'}
\item \code{'Info'}
\item \code{'Debug'}
}
Default value:
'Silent'
\item metric_period
The frequency of iterations to print the information to stdout. The value should be a positive integer.
Default value:
1
\item train_dir
The directory for storing the files generated during training.
Default value:
None (current catalog)
\item save_snapshot
Enable snapshotting for restoring the training progress after an interruption.
Default value:
None
\item snapshot_file
Settings for recovering training after an interruption (see
\url{https://tech.yandex.com/catboost/doc/dg/concepts/snapshots-docpage/#snapshots}).
Depending on whether the file specified exists in the file system:
\itemize{
\item Missing – write information about training progress to the specified file.
\item Exists – load data from the specified file and continue training from where it left off.
}
Default value:
File can't be generated or read. If the value is omitted, the file name is experiment.cbsnapshot.
\item allow_writing_files
If this flag is set to FALSE, no files with different diagnostic info will be created during training.
With this flag set to FALSE no snapshotting can be done. Plus visualisation will not
work, because visualisation uses files that are created and updated during training.
Default value:
TRUE
\item approx_on_full_history
If this flag is set to TRUE, each approximated value is calculated using all the preceeding rows in the fold (slower, more accurate).
If this flag is set to FALSE, each approximated value is calculated using only the beginning 1/fold_len_multiplier fraction of the fold (faster, slightly less accurate).
Default value:
FALSE
\item boosting_type
Boosting scheme.
Possible values:
- 'Dynamic' - Gives better quality, but may slow down the training.
- 'Plain' - The classic gradient boosting scheme. May result in quality degradation, but does not slow down the training.
Default value:
'Dynamic'
}
}
}
\examples{
fit_params <- list(iterations = 100,
thread_count = 10,
loss_function = 'Logloss',
ignored_features = c(4,9),
border_count = 32,
depth = 5,
learning_rate = 0.03,
l2_leaf_reg = 3.5,
border = 0.5,
train_dir = 'train_dir')
model <- catboost.train(pool, test_pool, fit_params)
}
\seealso{
\url{https://tech.yandex.com/catboost/doc/dg/concepts/r-reference_catboost-train-docpage/}
}
| /catboost/R-package/man/catboost.train.Rd | permissive | VLVLKY/catboost | R | false | true | 19,588 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/catboost.R
\name{catboost.train}
\alias{catboost.train}
\title{Train the model}
\usage{
catboost.train(learn_pool, test_pool = NULL, params = list())
}
\arguments{
\item{learn_pool}{The dataset used for training the model.
Default value: Required argument}
\item{test_pool}{The dataset used for testing the quality of the model.
Default value: NULL (not used)}
\item{params}{The list of parameters to start training with.
If omitted, default values are used (see The list of parameters).
If set, the passed list of parameters overrides the default values.
Default value: Required argument}
}
\description{
Train the model using a CatBoost dataset.
}
\details{
The list of parameters
\itemize{
\item Common parameters
\itemize{
\item fold_permutation_block_size
Objects in the dataset are grouped in blocks before the random permutations.
This parameter defines the size of the blocks.
The smaller is the value, the slower is the training.
Large values may result in quality degradation.
Default value:
Default value differs depending on the dataset size and ranges from 1 to 256 inclusively
\item ignored_features
Identifiers of features to exclude from training.
The non-negative indices that do not match any features are successfully ignored.
For example, if five features are defined for the objects in the dataset and this parameter
is set to “42”, the corresponding non-existing feature is successfully ignored.
The identifier corresponds to the feature's index.
Feature indices used in train and feature importance are numbered from 0 to featureCount – 1.
If a file is used as input data then any non-feature column types are ignored when calculating these
indices. For example, each row in the input file contains data in the following order:
categorical feature<\code{\t}>target value<\code{\t}> numerical feature. So for the row rock<\code{\t}>0 <\code{\t}>42,
the identifier for the “rock” feature is 0, and for the “42” feature it's 1.
The identifiers of features to exclude should be enumerated at vector.
For example, if training should exclude features with the identifiers
1, 2, 7, 42, 43, 44, 45, the value of this parameter should be set to c(1,2,7,42,43,44,45).
Default value:
None (use all features)
\item use_best_model
If this parameter is set, the number of trees that are saved in the resulting model is defined as follows:
Build the number of trees defined by the training parameters.
\itemize{
\item Identify the iteration with the optimal loss function value.
\item No trees are saved after this iteration.
}
This option requires a test dataset to be provided.
Default value:
FALSE (not used)
\item loss_function
The loss function (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/loss-functions-docpage/#loss-functions})
to use in training. The specified value also determines the machine learning problem to solve.
Format:
<Loss function 1>[:<parameter 1>=<value>:..<parameter N>=<value>:]
Supported loss functions:
\itemize{
\item 'Logloss'
\item 'CrossEntropy'
\item 'RMSE'
\item 'MAE'
\item 'Quantile'
\item 'LogLinQuantile'
\item 'MAPE'
\item 'Poisson'
\item 'QueryRMSE'
\item 'MultiClass'
\item 'MultiClassOneVsAll'
\item 'PairLogit'
}
Supported parameters:
\itemize{
\item alpha - The coefficient used in quantile-based losses ('Quantile' and 'LogLinQuantile'). The default value is 0.5.
For example, if you need to calculate the value of Quantile with the coefficient \eqn{\alpha = 0.1}, use the following construction:
'Quantile:alpha=0.1'
}
Default value:
'RMSE'
\item custom_loss
Loss function (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/loss-functions-docpage/#loss-functions})
values to output during training.
These functions are not optimized and are displayed for informational purposes only.
Format:
c(<Loss function 1>[:<parameter>=<value>],<Loss function 2>[:<parameter>=<value>],...,<Loss function N>[:<parameter>=<value>])
Supported loss functions:
\itemize{
\item 'Logloss'
\item 'CrossEntropy'
\item 'RMSE'
\item 'MAE'
\item 'Quantile'
\item 'LogLinQuantile'
\item 'MAPE'
\item 'Poisson'
\item 'QueryRMSE'
\item 'MultiClass'
\item 'MultiClassOneVsAll'
\item 'PairLogit'
\item 'R2'
\item 'AUC'
\item 'Accuracy'
\item 'Precision'
\item 'Recall'
\item 'F1'
\item 'TotalF1'
\item 'MCC'
\item 'PairAccuracy'
}
Supported parameters:
\itemize{
\item alpha - The coefficient used in quantile-based losses ('Quantile' and 'LogLinQuantile'). The default value is 0.5.
}
For example, if you need to calculate the value of CrossEntropy and Quantile with the coefficient \eqn{\alpha = 0.1}, use the following construction:
c('CrossEntropy') or simply 'CrossEntropy'.
Values of all custom loss functions for learning and test datasets are saved to the Loss function
(see \url{https://tech.yandex.com/catboost/doc/dg/concepts/output-data_error-functions-docpage/#output-data_error-functions})
output files (learn_error.tsv and test_error.tsv respectively). The catalog for these files is specified in the train-dir (train_dir) parameter.
Default value:
None (use one of the loss functions supported by the library)
\item eval_metric
The loss function used for overfitting detection (if enabled) and best model selection (if enabled).
Supported loss functions:
\itemize{
\item 'Logloss'
\item 'CrossEntropy'
\item 'RMSE'
\item 'MAE'
\item 'Quantile'
\item 'LogLinQuantile'
\item 'MAPE'
\item 'Poisson'
\item 'QueryRMSE'
\item 'MultiClass'
\item 'MultiClassOneVsAll'
\item 'PairLogit'
\item 'R2'
\item 'AUC'
\item 'Accuracy'
\item 'Precision'
\item 'Recall'
\item 'F1'
\item 'TotalF1'
\item 'MCC'
\item 'PairAccuracy'
}
Format:
metric_name:param=Value
Examples:
\code{'R2'}
\code{'Quantile:alpha=0.3'}
Default value:
Optimized objective is used
\item iterations
The maximum number of trees that can be built when solving machine learning problems.
When using other parameters that limit the number of iterations, the final number of trees may be less
than the number specified in this parameter.
Default value:
500
\item border
The target border. If the value is strictly greater than this threshold,
it is considered a positive class. Otherwise it is considered a negative class.
The parameter is obligatory if the Logloss function is used, since it uses borders to transform
any given target to a binary target.
Used in binary classification.
Default value:
0.5
\item leaf_estimation_iterations
The number of gradient steps when calculating the values in leaves.
Default value:
1
\item depth
Depth of the tree.
The value can be any integer up to 32. It is recommended to use values in the range [1; 10].
Default value:
6
\item learning_rate
The learning rate.
Used for reducing the gradient step.
Default value:
0.03
\item rsm
Random subspace method. The percentage of features to use at each iteration of building trees. At each iteration, features are selected over again at random.
The value must be in the range [0;1].
Default value:
1
\item random_seed
The random seed used for training.
Default value:
A new random value is selected on each run
\item nan_mode
Way to process nan-values.
Possible values:
\itemize{
\item \code{'Min'}
\item \code{'Max'}
\item \code{'Forbidden'}
}
Default value:
\code{'Min'}
\item od_pval
Use the Overfitting detector (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/overfitting-detector-docpage/#overfitting-detector})
to stop training when the threshold is reached.
Requires that a test dataset was input.
For best results, it is recommended to set a value in the range [10^-10; 10^-2].
The larger the value, the earlier overfitting is detected.
Default value:
The overfitting detection is turned off
\item od_type
The method used to calculate the values in leaves.
Possible values:
\itemize{
\item IncToDec
\item Iter
}
Restriction.
Do not specify the overfitting detector threshold when using the Iter type.
Default value:
'IncToDec'
\item od_wait
The number of iterations to continue the training after the iteration with the optimal loss function value.
The purpose of this parameter differs depending on the selected overfitting detector type:
\itemize{
\item IncToDec — Ignore the overfitting detector when the threshold is reached and continue learning for the specified number of iterations after the iteration with the optimal loss function value.
\item Iter — Consider the model overfitted and stop training after the specified number of iterations since the iteration with the optimal loss function value.
}
Default value:
20
\item leaf_estimation_method
The method used to calculate the values in leaves.
Possible values:
\itemize{
\item Newton
\item Gradient
}
Default value:
Default value depends on the selected loss function
\item l2_leaf_reg
L2 regularization coefficient. Used for leaf value calculation.
Any positive values are allowed.
Default value:
3
\item model_size_reg
Model size regularization coefficient. The influence coefficient of the model size for choosing tree structure.
To get a smaller model size - increase this coefficient.
Any positive values are allowed.
Default value:
0.5
\item has_time
Use the order of objects in the input data (do not perform random permutations during the
Transforming categorical features to numerical features (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/#algorithm-main-stages_cat-to-numberic})
and Choosing the tree structure (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_choose-tree-structure-docpage/#algorithm-main-stages_choose-tree-structure}) stages).
Default value:
FALSE (not used; generate random permutations)
\item name
The experiment name to display in visualization tools (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/visualization-docpage/#visualization}).
Default value:
experiment
\item prediction_type
The format for displaying approximated values in output data.
Possible values:
\itemize{
\item 'Probability'
\item 'Class'
\item 'RawFormulaVal'
}
Default value:
\code{'RawFormulaVal'}
\item fold_len_multiplier
Coefficient for changing the length of folds.
The value must be greater than 1. The best validation result is achieved with minimum values.
With values close to 1 (for example, \eqn{1 + \epsilon}), each iteration takes a quadratic amount of memory and time
for the number of objects in the iteration. Thus, low values are possible only when there is a small number of objects.
Default value:
2
\item class_weights
Classes weights. The values are used as multipliers for the object weights.
Classes are indexed from 0 to classes count – 1. For example, in case of binary classification the classes are indexed 0 and 1.
For examples:
\code{c(0.85, 1.2, 1)}
Default value:
None (the weight for all classes is set to 1)
\item classes_count
The upper limit for the numeric class label. Defines the number of classes for multiclassification.
Only non-negative integers can be specified. The given integer should be greater than any of the target
values.
If this parameter is specified the labels for all classes in the input dataset should be smaller
than the given value.
Default value:
maximum class label + 1
\item one_hot_max_size
Convert the feature to float if the number of different values that it takes exceeds the specified value. Ctrs are not calculated for such features.
The one-vs.-all delimiter is used for the resulting float features.
Default value:
FALSE
Do not convert features to float based on the number of different values
\item random_strength
Score standard deviation multiplier.
Default value:
1
\item bagging_temperature
Controls intensity of Bayesian bagging. The higher the temperature the more aggressive bagging is.
Typical values are in the range \eqn{[0, 1]} (0 is for no bagging).
Possible values are in the range \eqn{[0, +\infty)}.
Default value:
1
}
\item CTR settings
\itemize{
\item ctr_description
Binarization settings for categorical features (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/#algorithm-main-stages_cat-to-numberic}).
Format:
\code{c(<CTR type 1>:[<number of borders 1>:<Binarization type 1>],...,<CTR type N>:[<number of borders N>:<Binarization type N>])}
Components:
\itemize{
\item CTR types:
\itemize{
\item \code{'Borders'}
\item \code{'Buckets'}
\item \code{'BinarizedTargetMeanValue'}
\item \code{'Counter'}
}
\item The number of borders for target binarization. (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/binarization-docpage/#binarization})
Only used for regression problems. Allowed values are integers from 1 to 255 inclusively. The default value is 1.
\item The binarization (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/binarization-docpage/#binarization})
type for the target. Only used for regression problems.
Possible values:
\itemize{
\item \code{'Median'}
\item \code{'Uniform'}
\item \code{'UniformAndQuantiles'}
\item \code{'MaxLogSum'}
\item \code{'MinEntropy'}
\item \code{'GreedyLogSum'}
}
By default, \code{'MinEntropy'}
}
Default value:
\item counter_calc_method
The method for calculating the Counter CTR type for the test dataset.
Possible values:
\itemize{
\item \code{'Full'}
\item \code{'FullTest'}
\item \code{'PrefixTest'}
\item \code{'SkipTest'}
}
Default value: \code{'PrefixTest'}
\item ctr_border_count
The number of splits for categorical features.
Allowed values are integers from 1 to 255 inclusively.
Default value:
50
\item max_ctr_complexity
The maximum number of categorical features that can be combined.
Default value:
4
\item ctr_leaf_count_limit
The maximum number of leafs with categorical features.
If the quantity exceeds the specified value a part of leafs is discarded.
The leafs to be discarded are selected as follows:
\enumerate{
\item The leafs are sorted by the frequency of the values.
\item The top N leafs are selected, where N is the value specified in the parameter.
\item All leafs starting from N+1 are discarded.
}
This option reduces the resulting model size and the amount of memory required for training.
Note that the resulting quality of the model can be affected.
Default value:
None (The number of leafs with categorical features is not limited)
\item store_all_simple_ctr
Ignore categorical features, which are not used in feature combinations,
when choosing candidates for exclusion.
Use this parameter with ctr-leaf-count-limit only.
Default value:
FALSE (Both simple features and feature combinations are taken in account when limiting the number of leafs with categorical features)
}
\item Binarization settings
\itemize{
\item border_count
The number of splits for numerical features. Allowed values are integers from 1 to 255 inclusively.
Default value:
32
\item feature_border_type
The binarization mode (see \url{https://tech.yandex.com/catboost/doc/dg/concepts/binarization-docpage/#binarization})
for numerical features.
Possible values:
\itemize{
\item \code{'Median'}
\item \code{'Uniform'}
\item \code{'UniformAndQuantiles'}
\item \code{'MaxLogSum'}
\item \code{'MinEntropy'}
\item \code{'GreedyLogSum'}
}
Default value:
\code{'MinEntropy'}
}
\item Performance settings
\itemize{
\item thread_count
The number of threads to use when applying the model.
Allows you to optimize the speed of execution. This parameter doesn't affect results.
Default value:
Min(number of processor cores, 8)
}
\item Output settings
\itemize{
\item logging_level
Possible values:
\itemize{
\item \code{'Silent'}
\item \code{'Verbose'}
\item \code{'Info'}
\item \code{'Debug'}
}
Default value:
'Silent'
\item metric_period
The frequency of iterations to print the information to stdout. The value should be a positive integer.
Default value:
1
\item train_dir
The directory for storing the files generated during training.
Default value:
None (current catalog)
\item save_snapshot
Enable snapshotting for restoring the training progress after an interruption.
Default value:
None
\item snapshot_file
Settings for recovering training after an interruption (see
\url{https://tech.yandex.com/catboost/doc/dg/concepts/snapshots-docpage/#snapshots}).
Depending on whether the file specified exists in the file system:
\itemize{
\item Missing – write information about training progress to the specified file.
\item Exists – load data from the specified file and continue training from where it left off.
}
Default value:
File can't be generated or read. If the value is omitted, the file name is experiment.cbsnapshot.
\item allow_writing_files
If this flag is set to FALSE, no files with different diagnostic info will be created during training.
With this flag set to FALSE no snapshotting can be done. Plus visualisation will not
work, because visualisation uses files that are created and updated during training.
Default value:
TRUE
\item approx_on_full_history
If this flag is set to TRUE, each approximated value is calculated using all the preceeding rows in the fold (slower, more accurate).
If this flag is set to FALSE, each approximated value is calculated using only the beginning 1/fold_len_multiplier fraction of the fold (faster, slightly less accurate).
Default value:
FALSE
\item boosting_type
Boosting scheme.
Possible values:
- 'Dynamic' - Gives better quality, but may slow down the training.
- 'Plain' - The classic gradient boosting scheme. May result in quality degradation, but does not slow down the training.
Default value:
'Dynamic'
}
}
}
\examples{
fit_params <- list(iterations = 100,
thread_count = 10,
loss_function = 'Logloss',
ignored_features = c(4,9),
border_count = 32,
depth = 5,
learning_rate = 0.03,
l2_leaf_reg = 3.5,
border = 0.5,
train_dir = 'train_dir')
model <- catboost.train(pool, test_pool, fit_params)
}
\seealso{
\url{https://tech.yandex.com/catboost/doc/dg/concepts/r-reference_catboost-train-docpage/}
}
|
.baseprj <- function(clon) {
sprintf(
"+proj=stere +lon_0=%f +lat_0=-90 +lat_ts=-70 +k=1 +x_0=0 +y_0=0 +a=6378273 +b=6356889.449 +units=m +no_defs",
clon
)
}
.mkregion <-
function(xmin, xmax, ymin, ymax, lonmin, lonmax, latmin, latmax, proj) {
list(
xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,
lonmin = lonmin, lonmax = lonmax, latmin = latmin, latmax = latmax,
proj = proj
)
}
#.regionnames <- c("casey", "davis", "durville", "mawson", "shackleton", "terranova",
# "westice", "ragnhild", "enderby", "capeadare", "sabrina")
.regionindex <- function(name) {
c(
"casey" = "21", "davis" = "22", "durville" = "23", "mawson" = "24", "shackleton" = "25", "terranova" = "26",
"westice" = "27", "ragnhild" = "28", "enderby" = "41", "capeadare" = "42", "sabrina" = "46"
)[name]
}
.token <- function(idx) {
sprintf("IDTE9%s", .regionindex(idx))
}
.regions <- function(name) {
# MOSAIC at http://avhrr.acecrc.org.au/mosaics/
## projection "+proj=stere +lon_0=105 +lat_0=-90 +lat_ts=-70 +k=1 +x_0=0 +y_0=0 +a=6378273 +b=6356889.449 +units=m +no_defs"
## projected c(xmin = -2502020, xmax = 2492591, ymin = 318842, ymax = 4067990)
## pixel c(365, 1060, 34, 578)
## lonlat c(40, 140, -80, -60)
x <- switch(
name,
casey = .mkregion(178, 401, 181, 408,
105, 110,-66,-64,
proj = .baseprj(110)),
davis = .mkregion(
# xmin = 135, xmax = 562, ymin = 187, ymax = 394,
xmin = 160, xmax = 575, ymin = 179, ymax = 402,
lonmin = 70, lonmax = 80, latmin = -68, latmax = -66,
proj = .baseprj(76)
),
durville = .mkregion(279, 704, 101, 775,
140, 150,-68,-62,
proj = .baseprj(148)),
mawson = .mkregion(238, 447, 77, 517,
60, 65, -68,-64,
proj = .baseprj(64)),
shackleton = .mkregion(118, 783, 65, 491,
90, 105, -68,-64,
proj = .baseprj(97)) ,
terranova = .mkregion(181, 546, 27, 462,
160, 175, -78,-74,
proj = .baseprj(170)),
westice = .mkregion(77, 496, 114, 571,
80, 90, -68,-64,
proj = .baseprj(88)),
ragnhild = .mkregion(175, 908, 81, 755,
10, 30, -72,-66,
proj = .baseprj(23)),
enderby = .mkregion(270, 887, 101, 763,
40, 55, -70,-64,
proj = .baseprj(49)),
capeadare = .mkregion(167, 473, 70, 513,
160, 170, -74,-70,
proj = .baseprj(168))
,
sabrina = .mkregion(118, 782, 66, 490,
115, 130, -68,-64,
proj = .baseprj(122))
)
x$token <- .token(name)
x
}
#' Title
#'
#' @param date
#' @param region
#' @param band
#'
#' @export
#'
#' @examples
#' \dontrun{
#' dates <- Sys.Date() - c(1, 2, 3, 4, 5)
#' for (i in seq_along(dates)) {
#' r <- asosi(dates[i])
#' writeRaster(r, sprintf("infrared%s.tif", format(dates[i])))
#' r2 <- asosi(dates[i], band = "visible")
#' writeRaster(r2, sprintf("visible%s.tif", format(dates[i])))
#' }
#' ## prepare an object to build graticule lines
#' temp <- as(extent(r), "SpatialPolygons")
#' #' projection(temp) <- projection(r)
#'
#' plot(r);llgridlines(temp)
#' }
asosi <-
function(date, region = c(
"casey", "davis", "durville", "mawson", "shackleton", "terranova",
"westice", "ragnhild", "enderby", "capeadare", "sabrina"
),
band = c("infrared", "visible")) {
##http://www.bom.gov.au/fwo/IDTE9221/IDTE9221.0223.4D.gif
##http://www.bom.gov.au/fwo/IDTE9222/IDTE9222.0224.1D.gif
##http://www.bom.gov.au/fwo/IDTE9222/IDTE9222.0223.3D.gif
##http://www.bom.gov.au/fwo/IDTE9221/IDTE9221.0223.4D.gif
## accept 1 (IR) or 2 (VIS)
if (missing(date)) date <- Sys.Date() - 1
band <- band[1L]
if (is.numeric(band))
band <- c("infrared", "visible")[band]
pp <- seq(9, 1, by = -2) - c(infrared = 0, visible = 1)[band]
app <- c(infrared = 2, visible = 1)[band]
region <- match.arg(region)
regionObj <- .regions(region)
## Durville
##llpts <- cbind(c(140, 150), c(-68, -62))
##centre <- "148"
token <- sprintf("%s%s", regionObj$token, as.character(app))
for (ipop in seq_along(pp)) {
fname <-
sprintf(
"http://www.bom.gov.au/fwo/%s/%s.%s.%sD.gif", token, token, format(date, "%m%d"), as.character(pp[ipop])
)
tfile <- file.path(tempdir(), basename(fname))
if (!file.exists(tfile)) {
d <- try(download.file(fname, tfile, mode = "wb"))
}
r <- try(raster(tfile))
if (!inherits(r, "try-error")) {
break;
}
}
prj <- regionObj$proj
rawxy <-
matrix(unlist(regionObj[c("xmin", "xmax", "ymin", "ymax")]), ncol = 2)
llpts <-
matrix(unlist(regionObj[c("lonmin", "lonmax", "latmin", "latmax")]), ncol = 2)
pts <- project(llpts, prj)
## do the math
## scale = size of pixels in X/Y
## offset = bottom left corner of bottom left pixel)
scalex <- diff(pts[, 1]) / diff(rawxy[, 1])
scaley <- diff(pts[, 2]) / diff(rawxy[, 2])
offsetx <- pts[1,1] - rawxy[1,1] * scalex
offsety <- pts[1,2] - rawxy[1,2] * scaley
## x0, (x0 + ncol * pixelX), y0, (y0 + nrow * pixelY)
pex <-
extent(offsetx, offsetx + scalex * (ncol(r) + 1), offsety, offsety + scaley * (nrow(r) + 1))
## override raw index-transform applied to input image
pd <- setExtent(r, pex)
projection(pd) <- prj
## prepare an object to build graticule lines
temp <- as(extent(pd), "SpatialPolygons")
projection(temp) <- prj
return(pd)
stop("cannot find file at", fname, "or", gsub("3D", pp, fname))
}
| /R/asosi.R | no_license | mdsumner/asosi | R | false | false | 6,010 | r | .baseprj <- function(clon) {
sprintf(
"+proj=stere +lon_0=%f +lat_0=-90 +lat_ts=-70 +k=1 +x_0=0 +y_0=0 +a=6378273 +b=6356889.449 +units=m +no_defs",
clon
)
}
.mkregion <-
function(xmin, xmax, ymin, ymax, lonmin, lonmax, latmin, latmax, proj) {
list(
xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,
lonmin = lonmin, lonmax = lonmax, latmin = latmin, latmax = latmax,
proj = proj
)
}
#.regionnames <- c("casey", "davis", "durville", "mawson", "shackleton", "terranova",
# "westice", "ragnhild", "enderby", "capeadare", "sabrina")
.regionindex <- function(name) {
c(
"casey" = "21", "davis" = "22", "durville" = "23", "mawson" = "24", "shackleton" = "25", "terranova" = "26",
"westice" = "27", "ragnhild" = "28", "enderby" = "41", "capeadare" = "42", "sabrina" = "46"
)[name]
}
.token <- function(idx) {
sprintf("IDTE9%s", .regionindex(idx))
}
.regions <- function(name) {
# MOSAIC at http://avhrr.acecrc.org.au/mosaics/
## projection "+proj=stere +lon_0=105 +lat_0=-90 +lat_ts=-70 +k=1 +x_0=0 +y_0=0 +a=6378273 +b=6356889.449 +units=m +no_defs"
## projected c(xmin = -2502020, xmax = 2492591, ymin = 318842, ymax = 4067990)
## pixel c(365, 1060, 34, 578)
## lonlat c(40, 140, -80, -60)
x <- switch(
name,
casey = .mkregion(178, 401, 181, 408,
105, 110,-66,-64,
proj = .baseprj(110)),
davis = .mkregion(
# xmin = 135, xmax = 562, ymin = 187, ymax = 394,
xmin = 160, xmax = 575, ymin = 179, ymax = 402,
lonmin = 70, lonmax = 80, latmin = -68, latmax = -66,
proj = .baseprj(76)
),
durville = .mkregion(279, 704, 101, 775,
140, 150,-68,-62,
proj = .baseprj(148)),
mawson = .mkregion(238, 447, 77, 517,
60, 65, -68,-64,
proj = .baseprj(64)),
shackleton = .mkregion(118, 783, 65, 491,
90, 105, -68,-64,
proj = .baseprj(97)) ,
terranova = .mkregion(181, 546, 27, 462,
160, 175, -78,-74,
proj = .baseprj(170)),
westice = .mkregion(77, 496, 114, 571,
80, 90, -68,-64,
proj = .baseprj(88)),
ragnhild = .mkregion(175, 908, 81, 755,
10, 30, -72,-66,
proj = .baseprj(23)),
enderby = .mkregion(270, 887, 101, 763,
40, 55, -70,-64,
proj = .baseprj(49)),
capeadare = .mkregion(167, 473, 70, 513,
160, 170, -74,-70,
proj = .baseprj(168))
,
sabrina = .mkregion(118, 782, 66, 490,
115, 130, -68,-64,
proj = .baseprj(122))
)
x$token <- .token(name)
x
}
#' Title
#'
#' @param date
#' @param region
#' @param band
#'
#' @export
#'
#' @examples
#' \dontrun{
#' dates <- Sys.Date() - c(1, 2, 3, 4, 5)
#' for (i in seq_along(dates)) {
#' r <- asosi(dates[i])
#' writeRaster(r, sprintf("infrared%s.tif", format(dates[i])))
#' r2 <- asosi(dates[i], band = "visible")
#' writeRaster(r2, sprintf("visible%s.tif", format(dates[i])))
#' }
#' ## prepare an object to build graticule lines
#' temp <- as(extent(r), "SpatialPolygons")
#' #' projection(temp) <- projection(r)
#'
#' plot(r);llgridlines(temp)
#' }
asosi <-
function(date, region = c(
"casey", "davis", "durville", "mawson", "shackleton", "terranova",
"westice", "ragnhild", "enderby", "capeadare", "sabrina"
),
band = c("infrared", "visible")) {
##http://www.bom.gov.au/fwo/IDTE9221/IDTE9221.0223.4D.gif
##http://www.bom.gov.au/fwo/IDTE9222/IDTE9222.0224.1D.gif
##http://www.bom.gov.au/fwo/IDTE9222/IDTE9222.0223.3D.gif
##http://www.bom.gov.au/fwo/IDTE9221/IDTE9221.0223.4D.gif
## accept 1 (IR) or 2 (VIS)
if (missing(date)) date <- Sys.Date() - 1
band <- band[1L]
if (is.numeric(band))
band <- c("infrared", "visible")[band]
pp <- seq(9, 1, by = -2) - c(infrared = 0, visible = 1)[band]
app <- c(infrared = 2, visible = 1)[band]
region <- match.arg(region)
regionObj <- .regions(region)
## Durville
##llpts <- cbind(c(140, 150), c(-68, -62))
##centre <- "148"
token <- sprintf("%s%s", regionObj$token, as.character(app))
for (ipop in seq_along(pp)) {
fname <-
sprintf(
"http://www.bom.gov.au/fwo/%s/%s.%s.%sD.gif", token, token, format(date, "%m%d"), as.character(pp[ipop])
)
tfile <- file.path(tempdir(), basename(fname))
if (!file.exists(tfile)) {
d <- try(download.file(fname, tfile, mode = "wb"))
}
r <- try(raster(tfile))
if (!inherits(r, "try-error")) {
break;
}
}
prj <- regionObj$proj
rawxy <-
matrix(unlist(regionObj[c("xmin", "xmax", "ymin", "ymax")]), ncol = 2)
llpts <-
matrix(unlist(regionObj[c("lonmin", "lonmax", "latmin", "latmax")]), ncol = 2)
pts <- project(llpts, prj)
## do the math
## scale = size of pixels in X/Y
## offset = bottom left corner of bottom left pixel)
scalex <- diff(pts[, 1]) / diff(rawxy[, 1])
scaley <- diff(pts[, 2]) / diff(rawxy[, 2])
offsetx <- pts[1,1] - rawxy[1,1] * scalex
offsety <- pts[1,2] - rawxy[1,2] * scaley
## x0, (x0 + ncol * pixelX), y0, (y0 + nrow * pixelY)
pex <-
extent(offsetx, offsetx + scalex * (ncol(r) + 1), offsety, offsety + scaley * (nrow(r) + 1))
## override raw index-transform applied to input image
pd <- setExtent(r, pex)
projection(pd) <- prj
## prepare an object to build graticule lines
temp <- as(extent(pd), "SpatialPolygons")
projection(temp) <- prj
return(pd)
stop("cannot find file at", fname, "or", gsub("3D", pp, fname))
}
|
\docType{class}
\name{IncrRBFN_C}
\alias{IncrRBFN_C}
\alias{R6_IncrRBFN_C}
\title{IncrRBFN_C KEEL Classification Algorithm}
\description{
IncrRBFN_C Classification Algorithm from KEEL.
}
\usage{
IncrRBFN_C(train, test, epsilon, alfa, delta, seed)
}
\arguments{
\item{train}{Train dataset as a data.frame object}
\item{test}{Test dataset as a data.frame object}
\item{epsilon}{epsilon. Default value = 0.1}
\item{alfa}{alfa. Default value = 0.3}
\item{delta}{delta. Default value = 0.5}
\item{seed}{Seed for random numbers. If it is not assigned a value, the seed will be a random number}
}
\value{
A data.frame with the actual and predicted classes for both \code{train} and \code{test} datasets.
}
\examples{
data_train <- RKEEL::loadKeelDataset("iris_train")
data_test <- RKEEL::loadKeelDataset("iris_test")
#Create algorithm
algorithm <- RKEEL::IncrRBFN_C(data_train, data_test)
#Run algorithm
algorithm$run()
#See results
algorithm$testPredictions
}
\keyword{classification}
| /man/Incr-RBFN-C.Rd | no_license | terry07/RKEEL | R | false | false | 984 | rd | \docType{class}
\name{IncrRBFN_C}
\alias{IncrRBFN_C}
\alias{R6_IncrRBFN_C}
\title{IncrRBFN_C KEEL Classification Algorithm}
\description{
IncrRBFN_C Classification Algorithm from KEEL.
}
\usage{
IncrRBFN_C(train, test, epsilon, alfa, delta, seed)
}
\arguments{
\item{train}{Train dataset as a data.frame object}
\item{test}{Test dataset as a data.frame object}
\item{epsilon}{epsilon. Default value = 0.1}
\item{alfa}{alfa. Default value = 0.3}
\item{delta}{delta. Default value = 0.5}
\item{seed}{Seed for random numbers. If it is not assigned a value, the seed will be a random number}
}
\value{
A data.frame with the actual and predicted classes for both \code{train} and \code{test} datasets.
}
\examples{
data_train <- RKEEL::loadKeelDataset("iris_train")
data_test <- RKEEL::loadKeelDataset("iris_test")
#Create algorithm
algorithm <- RKEEL::IncrRBFN_C(data_train, data_test)
#Run algorithm
algorithm$run()
#See results
algorithm$testPredictions
}
\keyword{classification}
|
args = commandArgs(TRUE)
exprs = args[1]
#distance = args[2]
#n = args[3]
#linkage = args[4]
path = args[2]
path_output = args[3]
library('amap')
library('dynamicTreeCut')
setwd(path)
exprs = as.matrix(exprs)
exprs = read.table(exprs, header=T,row.names=1,sep="\t")
d <- Dist(as.matrix(exprs), method="euclidean")
tree.euclidian <- hclust(d)
cut2 <-cutreeDynamic(tree.euclidian, distM = as.matrix(d), deepSplit=2)
#clusters.euclidian50 <- cutree(tree.euclidian,k=n)
list1 <- as.list(cut2)
write.table(as.matrix(list1), path_output, sep="\t")
| /Clustering/hclust_dynamic_031414.R | no_license | anandksrao/Gene_coexpression_scripts | R | false | false | 568 | r | args = commandArgs(TRUE)
exprs = args[1]
#distance = args[2]
#n = args[3]
#linkage = args[4]
path = args[2]
path_output = args[3]
library('amap')
library('dynamicTreeCut')
setwd(path)
exprs = as.matrix(exprs)
exprs = read.table(exprs, header=T,row.names=1,sep="\t")
d <- Dist(as.matrix(exprs), method="euclidean")
tree.euclidian <- hclust(d)
cut2 <-cutreeDynamic(tree.euclidian, distM = as.matrix(d), deepSplit=2)
#clusters.euclidian50 <- cutree(tree.euclidian,k=n)
list1 <- as.list(cut2)
write.table(as.matrix(list1), path_output, sep="\t")
|
#####################################################################################
### This script age-split aggregated age data by using DisMod output
####################################################################################
pacman::p_load(data.table, openxlsx, ggplot2, magrittr)
date <- gsub("-", "_", Sys.Date())
date <- Sys.Date()
# GET OBJECTS -------------------------------------------------------------
b_id <- "BUNDLE_ID" #this is bundle ID
a_cause <- "digest_ibd"
name <- "UC"
date <- gsub("-", "_", date)
draws <- paste0("draw_", 0:999)
# SET FUNCTIONS ------------------------------------------------------------
library(mortdb, lib = "FILEPATH")
repo_dir <- "FILEPATH"
functions_dir <- "FILEPATH"
functs <- c("get_crosswalk_version.R", "save_crosswalk_version.R","get_draws", "get_population", "get_location_metadata", "get_age_metadata", "get_ids")
invisible(lapply(functs, function(x) source(paste0(functions_dir, x, ".R"))))
# INPUT DATA -------------------------------------------------------------
dt <- copy("FILEPATH") #CROSSWALKED DATASET
dt$crosswalk_parent_seq <- dt$seq
dt$group_review[is.na(dt$group_review)] <-1
dt$group_review[dt$group_review==0 & dt$is_outlier==0] <-1 #correct wrongly tagged
dt_inc <- subset(dt, measure=="incidence")
dt_prev <- subset(dt, measure=="prevalence")
# CREATE FUNCTIONS -----------------------------------------------------------
## FILL OUT MEAN/CASES/SAMPLE SIZE
get_cases_sample_size <- function(raw_dt){
dt <- copy(raw_dt)
dt[is.na(mean), mean := cases/sample_size]
dt[is.na(cases) & !is.na(sample_size), cases := mean * sample_size]
dt[is.na(sample_size) & !is.na(cases), sample_size := cases / mean]
return(dt)
}
## CALCULATE STD ERROR BASED ON UPLOADER FORMULAS
get_se <- function(raw_dt){
dt <- copy(raw_dt)
dt[is.na(standard_error) & !is.na(lower) & !is.na(upper), standard_error := (upper-lower)/3.92]
z <- qnorm(0.975)
dt[is.na(standard_error) & measure == "proportion", standard_error := sqrt(mean*(1-mean)/sample_size + z^2/(4*sample_size^2))]
dt[is.na(standard_error) & measure == "prevalence", standard_error := sqrt(mean*(1-mean)/sample_size + z^2/(4*sample_size^2))]
dt[is.na(standard_error) & measure == "incidence" & cases < 5, standard_error := ((5-mean*sample_size)/sample_size+mean*sample_size*sqrt(5/sample_size^2))/5]
dt[is.na(standard_error) & measure == "incidence" & cases >= 5, standard_error := sqrt(mean/sample_size)]
return(dt)
}
## GET CASES IF THEY ARE MISSING
calculate_cases_fromse <- function(raw_dt){
dt <- copy(raw_dt)
dt[is.na(cases) & is.na(sample_size) & measure == "proportion", sample_size := (mean*(1-mean)/standard_error^2)]
dt[is.na(cases) & is.na(sample_size) & measure == "prevalence", sample_size := (mean*(1-mean)/standard_error^2)]
dt[is.na(cases) & is.na(sample_size) & measure == "incidence", sample_size := mean/standard_error^2]
dt[is.na(cases), cases := mean * sample_size]
return(dt)
}
## MAKE SURE DATA IS FORMATTED CORRECTLY
format_data <- function(unformatted_dt, sex_dt){
dt <- copy(unformatted_dt)
dt[, `:=` (mean = as.numeric(mean), sample_size = as.numeric(sample_size), cases = as.numeric(cases),
age_start = as.numeric(age_start), age_end = as.numeric(age_end), year_start = as.numeric(year_start))]
dt <- dt[measure %in% c("proportion", "prevalence", "incidence"),]
dt <- dt[!group_review==0 | is.na(group_review),] ##don't use group_review 0
dt <- dt[is_outlier==0,] ##don't age split outliered data
dt <- dt[(age_end-age_start)>25 & cv_literature==1 ,] #for prevelance, incidence, proportion
dt <- dt[(!mean == 0 & !cases == 0) |(!mean == 0 & is.na(cases)) , ]
dt <- merge(dt, sex_dt, by = "sex")
dt[measure == "proportion", measure_id := 18]
dt[measure == "prevalence", measure_id := 5]
dt[measure == "incidence", measure_id := 6]
dt[, year_id := round((year_start + year_end)/2, 0)]
return(dt)
}
## CREATE NEW AGE ROWS
expand_age <- function(small_dt, age_dt = ages){
dt <- copy(small_dt)
## ROUND AGE GROUPS
dt[, age_start := age_start - age_start %%5]
dt[, age_end := age_end - age_end %%5 + 4]
dt <- dt[age_end > 99, age_end := 99]
## EXPAND FOR AGE
dt[, n.age:=(age_end+1 - age_start)/5]
dt[, age_start_floor:=age_start]
dt[, drop := cases/n.age] ##drop the data points if cases/n.age is less than 1
expanded <- rep(dt$id, dt$n.age) %>% data.table("id" = .)
split <- merge(expanded, dt, by="id", all=T)
split[, age.rep := 1:.N - 1, by =.(id)]
split[, age_start:= age_start+age.rep*5]
split[, age_end := age_start + 4]
split <- merge(split, age_dt, by = c("age_start", "age_end"), all.x = T)
split[age_start == 0 & age_end == 4, age_group_id := 1]
split <- split[age_group_id %in% age | age_group_id == 1] ##don't keep where age group id isn't estimated for cause
return(split)
}
## GET DISMOD AGE PATTERN
get_age_pattern <- function(locs, id, age_groups){
age_pattern <- get_draws(gbd_id_type = "modelable_entity_id", gbd_id = id, ## USING 2010 AGE PATTERN BECAUSE LIKELY HAVE MORE DATA FOR 2010
measure_id = measure_id, location_id = locs, source = "epi",
version_id = version_id, sex_id = c(1,2), gbd_round_id = 6, decomp_step = "step2", #can replace version_id with status = "best" or "latest"
age_group_id = age_groups, year_id = 2010) ##imposing age pattern
us_population <- get_population(location_id = locs, year_id = 2010, sex_id = c(1, 2),
age_group_id = age_groups, decomp_step = "step2")
us_population <- us_population[, .(age_group_id, sex_id, population, location_id)]
age_pattern[, se_dismod := apply(.SD, 1, sd), .SDcols = draws]
age_pattern[, rate_dis := rowMeans(.SD), .SDcols = draws]
age_pattern[, (draws) := NULL]
age_pattern <- age_pattern[ ,.(sex_id, measure_id, age_group_id, location_id, se_dismod, rate_dis)]
## AGE GROUP 1 (SUM POPULATION WEIGHTED RATES)
age_1 <- copy(age_pattern)
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5), ]
se <- copy(age_1)
se <- se[age_group_id==5, .(measure_id, sex_id, se_dismod, location_id)]
age_1 <- merge(age_1, us_population, by = c("age_group_id", "sex_id", "location_id"))
age_1[, total_pop := sum(population), by = c("sex_id", "measure_id", "location_id")]
age_1[, frac_pop := population / total_pop]
age_1[, weight_rate := rate_dis * frac_pop]
age_1[, rate_dis := sum(weight_rate), by = c("sex_id", "measure_id", "location_id")]
age_1 <- unique(age_1, by = c("sex_id", "measure_id", "location_id"))
age_1 <- age_1[, .(age_group_id, sex_id, measure_id, location_id, rate_dis)]
age_1 <- merge(age_1, se, by = c("sex_id", "measure_id", "location_id"))
age_1[, age_group_id := 1]
age_pattern <- age_pattern[!age_group_id %in% c(2,3,4,5)]
age_pattern <- rbind(age_pattern, age_1)
## CASES AND SAMPLE SIZE
age_pattern[measure_id == 18, sample_size_us := rate_dis * (1-rate_dis)/se_dismod^2]
age_pattern[, cases_us := sample_size_us * rate_dis]
age_pattern[is.nan(sample_size_us), sample_size_us := 0] ##if all draws are 0 can't calculate cases and sample size b/c se = 0, but should both be 0
age_pattern[is.nan(cases_us), cases_us := 0]
## GET SEX ID 3
sex_3 <- copy(age_pattern)
sex_3[, cases_us := sum(cases_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, sample_size_us := sum(sample_size_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, rate_dis := cases_us/sample_size_us]
sex_3[measure_id == 18, se_dismod := sqrt(rate_dis*(1-rate_dis)/sample_size_us)] ##back calculate cases and sample size
sex_3[is.nan(rate_dis), rate_dis := 0] ##if sample_size is 0 can't calculate rate and standard error, but should both be 0
sex_3[is.nan(se_dismod), se_dismod := 0]
sex_3 <- unique(sex_3, by = c("age_group_id", "measure_id", "location_id"))
sex_3[, sex_id := 3]
age_pattern <- rbind(age_pattern, sex_3)
age_pattern[, super_region_id := location_id]
age_pattern <- age_pattern[ ,.(age_group_id, sex_id, measure_id, cases_us, sample_size_us, rate_dis, se_dismod, super_region_id)]
return(age_pattern)
}
## GET POPULATION STRUCTURE
get_pop_structure <- function(locs, years, age_groups){
populations <- get_population(location_id = locs, year_id = years,decomp_step = "step2",
sex_id = c(1, 2, 3), age_group_id = age_groups)
age_1 <- copy(populations) ##create age group id 1 by collapsing lower age groups
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5)]
age_1[, population := sum(population), by = c("location_id", "year_id", "sex_id")]
age_1 <- unique(age_1, by = c("location_id", "year_id", "sex_id"))
age_1[, age_group_id := 1]
populations <- populations[!age_group_id %in% c(2, 3, 4, 5)]
populations <- rbind(populations, age_1) ##add age group id 1 back on
return(populations)
}
## ACTUALLY SPLIT THE DATA
split_data <- function(raw_dt){
dt <- copy(raw_dt)
dt[, total_pop := sum(population), by = "id"]
dt[, sample_size := (population / total_pop) * sample_size]
dt[, cases_dis := sample_size * rate_dis]
dt[, total_cases_dis := sum(cases_dis), by = "id"]
dt[, total_sample_size := sum(sample_size), by = "id"]
dt[, all_age_rate := total_cases_dis/total_sample_size]
dt[, ratio := mean / all_age_rate]
dt[, mean := ratio * rate_dis ]
dt <- dt[mean < 1, ]
dt[, cases := mean * sample_size]
return(dt)
}
## FORMAT DATA TO FINISH
format_data_forfinal <- function(unformatted_dt, location_split_id, region, original_dt){
dt <- copy(unformatted_dt)
dt[, group := 1]
dt[, specificity := "age,sex"]
dt[, group_review := 1]
dt[is.na(crosswalk_parent_seq), crosswalk_parent_seq := seq]
blank_vars <- c("lower", "upper", "effective_sample_size", "standard_error", "uncertainty_type", "uncertainty_type_value", "seq")
dt[, (blank_vars) := NA]
dt <- get_se(dt)
if (region == T) {
dt[, note_modeler := paste0(note_modeler, "| age split using the super region age pattern", date)]
} else {
dt[, note_modeler := paste0(note_modeler, "| age split using the age pattern from location id ", location_split_id, " ", date)]
}
split_ids <- dt[, unique(id)]
dt <- rbind(original_dt[!id %in% split_ids], dt, fill = T)
dt <- dt[, c(names(df)), with = F]
return(dt)
}
###########################################################################################
## FIRST WE AGE-SPLIT INCIDENCE DATA
id <- "DISMOD_ID" ## this is the meid for iterative or wherever age split Dismod was run
version_id <- "DISMOD_VERSION_ID"
measure_id <- 6 ##Measure ID 5= prev, 6=incidence, 18=proportion
region_pattern <- FALSE
# RUN THESE CALLS ---------------------------------------------------------------------------
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
age_groups <- ages[age_start >= 5, age_group_id]
df <- copy(dt_inc)
age <- age_groups
gbd_id <- id
location_pattern_id <- 1
# AGE SPLIT FUNCTION -----------------------------------------------------------------------
## GET TABLES
sex_names <- get_ids(table = "sex")
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
ages[, age_group_weight_value := NULL]
ages[age_start >= 1, age_end := age_end - 1]
ages[age_end == 124, age_end := 99]
super_region_dt <- get_location_metadata(location_set_id = 22)
super_region_dt <- super_region_dt[, .(location_id, super_region_id)]
## SAVE ORIGINAL DATA
original <- copy(df)
original[, id := 1:.N]
## FORMAT DATA
dt <- format_data(original, sex_dt = sex_names)
dt <- get_cases_sample_size(dt)
dt <- get_se(dt)
dt <- calculate_cases_fromse(dt)
## EXPAND AGE
split_dt <- expand_age(dt, age_dt = ages)
## GET PULL LOCATIONS
if (region_pattern == T){
split_dt <- merge(split_dt, super_region_dt, by = "location_id")
super_regions <- unique(split_dt$super_region_id) ##get super regions for dismod results
locations <- super_regions
} else {
locations <- location_pattern_id
}
##GET LOCS AND POPS
pop_locs <- unique(split_dt$location_id)
pop_years <- unique(split_dt$year_id)
## GET AGE PATTERN
print("getting age pattern")
age_pattern <- get_age_pattern(locs = locations, id = gbd_id, age_groups = age)
if (region_pattern == T) {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id", "super_region_id"))
} else {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id"))
}
## GET POPULATION INFO
print("getting pop structure")
pop_structure <- get_pop_structure(locs = pop_locs, years = pop_years, age_group = age)
split_dt <- merge(split_dt, pop_structure, by = c("location_id", "sex_id", "year_id", "age_group_id"))
## CREATE NEW POINTS
print("splitting data")
split_dt <- split_data(split_dt)
final_dt_inc <- format_data_forfinal(split_dt, location_split_id = location_pattern_id, region = region_pattern,
original_dt = original)
###########################################################################################
## NEXT, WE AGE-SPLIT PREVALENCE DATA
id <- "DISMOD_ID" ## this is the meid for iterative or wherever age split Dismod was run
version_id <- "DISMOD_VERSION_ID"
measure_id <- 5
region_pattern <- FALSE
# RUN THESE CALLS ---------------------------------------------------------------------------
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
age_groups <- ages[age_start >= 5, age_group_id]
df <- copy(dt_prev)
age <- age_groups
gbd_id <- id
location_pattern_id <- 1
# AGE SPLIT FUNCTION -----------------------------------------------------------------------
## GET TABLES
sex_names <- get_ids(table = "sex")
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
ages[, age_group_weight_value := NULL]
ages[age_start >= 1, age_end := age_end - 1]
ages[age_end == 124, age_end := 99]
super_region_dt <- get_location_metadata(location_set_id = 22)
super_region_dt <- super_region_dt[, .(location_id, super_region_id)]
## SAVE ORIGINAL DATA
original <- copy(df)
original[, id := 1:.N]
## FORMAT DATA
dt <- format_data(original, sex_dt = sex_names)
dt <- get_cases_sample_size(dt)
dt <- get_se(dt)
dt <- calculate_cases_fromse(dt)
## EXPAND AGE
split_dt <- expand_age(dt, age_dt = ages)
## GET PULL LOCATIONS
if (region_pattern == T){
split_dt <- merge(split_dt, super_region_dt, by = "location_id")
super_regions <- unique(split_dt$super_region_id) ##get super regions for dismod results
locations <- super_regions
} else {
locations <- location_pattern_id
}
##GET LOCS AND POPS
pop_locs <- unique(split_dt$location_id)
pop_years <- unique(split_dt$year_id)
## GET AGE PATTERN
print("getting age pattern")
age_pattern <- get_age_pattern(locs = locations, id = gbd_id, age_groups = age)
if (region_pattern == T) {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id", "super_region_id"))
} else {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id"))
}
## GET POPULATION INFO
print("getting pop structure")
pop_structure <- get_pop_structure(locs = pop_locs, years = pop_years, age_group = age)
split_dt <- merge(split_dt, pop_structure, by = c("location_id", "sex_id", "year_id", "age_group_id"))
## CREATE NEW POINTS
print("splitting data")
split_dt <- split_data(split_dt)
final_dt_prev <- format_data_forfinal(split_dt, location_split_id = location_pattern_id, region = region_pattern,
original_dt = original)
###########################################################################################
## LASTLY, APPEND PREVALENCE AND INCIDENCE DATA AND SAVE
append <- rbind.fill(final_dt, final_dt_prev)
write.csv(final_split, "FILEPATH")
| /gbd_2019/nonfatal_code/digest_ibd/Ulcerative_colitis/GBD2019_UC_post-DisMod_age-split.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 16,425 | r | #####################################################################################
### This script age-split aggregated age data by using DisMod output
####################################################################################
pacman::p_load(data.table, openxlsx, ggplot2, magrittr)
date <- gsub("-", "_", Sys.Date())
date <- Sys.Date()
# GET OBJECTS -------------------------------------------------------------
b_id <- "BUNDLE_ID" #this is bundle ID
a_cause <- "digest_ibd"
name <- "UC"
date <- gsub("-", "_", date)
draws <- paste0("draw_", 0:999)
# SET FUNCTIONS ------------------------------------------------------------
library(mortdb, lib = "FILEPATH")
repo_dir <- "FILEPATH"
functions_dir <- "FILEPATH"
functs <- c("get_crosswalk_version.R", "save_crosswalk_version.R","get_draws", "get_population", "get_location_metadata", "get_age_metadata", "get_ids")
invisible(lapply(functs, function(x) source(paste0(functions_dir, x, ".R"))))
# INPUT DATA -------------------------------------------------------------
dt <- copy("FILEPATH") #CROSSWALKED DATASET
dt$crosswalk_parent_seq <- dt$seq
dt$group_review[is.na(dt$group_review)] <-1
dt$group_review[dt$group_review==0 & dt$is_outlier==0] <-1 #correct wrongly tagged
dt_inc <- subset(dt, measure=="incidence")
dt_prev <- subset(dt, measure=="prevalence")
# CREATE FUNCTIONS -----------------------------------------------------------
## FILL OUT MEAN/CASES/SAMPLE SIZE
get_cases_sample_size <- function(raw_dt){
dt <- copy(raw_dt)
dt[is.na(mean), mean := cases/sample_size]
dt[is.na(cases) & !is.na(sample_size), cases := mean * sample_size]
dt[is.na(sample_size) & !is.na(cases), sample_size := cases / mean]
return(dt)
}
## CALCULATE STD ERROR BASED ON UPLOADER FORMULAS
get_se <- function(raw_dt){
dt <- copy(raw_dt)
dt[is.na(standard_error) & !is.na(lower) & !is.na(upper), standard_error := (upper-lower)/3.92]
z <- qnorm(0.975)
dt[is.na(standard_error) & measure == "proportion", standard_error := sqrt(mean*(1-mean)/sample_size + z^2/(4*sample_size^2))]
dt[is.na(standard_error) & measure == "prevalence", standard_error := sqrt(mean*(1-mean)/sample_size + z^2/(4*sample_size^2))]
dt[is.na(standard_error) & measure == "incidence" & cases < 5, standard_error := ((5-mean*sample_size)/sample_size+mean*sample_size*sqrt(5/sample_size^2))/5]
dt[is.na(standard_error) & measure == "incidence" & cases >= 5, standard_error := sqrt(mean/sample_size)]
return(dt)
}
## GET CASES IF THEY ARE MISSING
calculate_cases_fromse <- function(raw_dt){
dt <- copy(raw_dt)
dt[is.na(cases) & is.na(sample_size) & measure == "proportion", sample_size := (mean*(1-mean)/standard_error^2)]
dt[is.na(cases) & is.na(sample_size) & measure == "prevalence", sample_size := (mean*(1-mean)/standard_error^2)]
dt[is.na(cases) & is.na(sample_size) & measure == "incidence", sample_size := mean/standard_error^2]
dt[is.na(cases), cases := mean * sample_size]
return(dt)
}
## MAKE SURE DATA IS FORMATTED CORRECTLY
format_data <- function(unformatted_dt, sex_dt){
dt <- copy(unformatted_dt)
dt[, `:=` (mean = as.numeric(mean), sample_size = as.numeric(sample_size), cases = as.numeric(cases),
age_start = as.numeric(age_start), age_end = as.numeric(age_end), year_start = as.numeric(year_start))]
dt <- dt[measure %in% c("proportion", "prevalence", "incidence"),]
dt <- dt[!group_review==0 | is.na(group_review),] ##don't use group_review 0
dt <- dt[is_outlier==0,] ##don't age split outliered data
dt <- dt[(age_end-age_start)>25 & cv_literature==1 ,] #for prevelance, incidence, proportion
dt <- dt[(!mean == 0 & !cases == 0) |(!mean == 0 & is.na(cases)) , ]
dt <- merge(dt, sex_dt, by = "sex")
dt[measure == "proportion", measure_id := 18]
dt[measure == "prevalence", measure_id := 5]
dt[measure == "incidence", measure_id := 6]
dt[, year_id := round((year_start + year_end)/2, 0)]
return(dt)
}
## CREATE NEW AGE ROWS
expand_age <- function(small_dt, age_dt = ages){
dt <- copy(small_dt)
## ROUND AGE GROUPS
dt[, age_start := age_start - age_start %%5]
dt[, age_end := age_end - age_end %%5 + 4]
dt <- dt[age_end > 99, age_end := 99]
## EXPAND FOR AGE
dt[, n.age:=(age_end+1 - age_start)/5]
dt[, age_start_floor:=age_start]
dt[, drop := cases/n.age] ##drop the data points if cases/n.age is less than 1
expanded <- rep(dt$id, dt$n.age) %>% data.table("id" = .)
split <- merge(expanded, dt, by="id", all=T)
split[, age.rep := 1:.N - 1, by =.(id)]
split[, age_start:= age_start+age.rep*5]
split[, age_end := age_start + 4]
split <- merge(split, age_dt, by = c("age_start", "age_end"), all.x = T)
split[age_start == 0 & age_end == 4, age_group_id := 1]
split <- split[age_group_id %in% age | age_group_id == 1] ##don't keep where age group id isn't estimated for cause
return(split)
}
## GET DISMOD AGE PATTERN
get_age_pattern <- function(locs, id, age_groups){
age_pattern <- get_draws(gbd_id_type = "modelable_entity_id", gbd_id = id, ## USING 2010 AGE PATTERN BECAUSE LIKELY HAVE MORE DATA FOR 2010
measure_id = measure_id, location_id = locs, source = "epi",
version_id = version_id, sex_id = c(1,2), gbd_round_id = 6, decomp_step = "step2", #can replace version_id with status = "best" or "latest"
age_group_id = age_groups, year_id = 2010) ##imposing age pattern
us_population <- get_population(location_id = locs, year_id = 2010, sex_id = c(1, 2),
age_group_id = age_groups, decomp_step = "step2")
us_population <- us_population[, .(age_group_id, sex_id, population, location_id)]
age_pattern[, se_dismod := apply(.SD, 1, sd), .SDcols = draws]
age_pattern[, rate_dis := rowMeans(.SD), .SDcols = draws]
age_pattern[, (draws) := NULL]
age_pattern <- age_pattern[ ,.(sex_id, measure_id, age_group_id, location_id, se_dismod, rate_dis)]
## AGE GROUP 1 (SUM POPULATION WEIGHTED RATES)
age_1 <- copy(age_pattern)
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5), ]
se <- copy(age_1)
se <- se[age_group_id==5, .(measure_id, sex_id, se_dismod, location_id)]
age_1 <- merge(age_1, us_population, by = c("age_group_id", "sex_id", "location_id"))
age_1[, total_pop := sum(population), by = c("sex_id", "measure_id", "location_id")]
age_1[, frac_pop := population / total_pop]
age_1[, weight_rate := rate_dis * frac_pop]
age_1[, rate_dis := sum(weight_rate), by = c("sex_id", "measure_id", "location_id")]
age_1 <- unique(age_1, by = c("sex_id", "measure_id", "location_id"))
age_1 <- age_1[, .(age_group_id, sex_id, measure_id, location_id, rate_dis)]
age_1 <- merge(age_1, se, by = c("sex_id", "measure_id", "location_id"))
age_1[, age_group_id := 1]
age_pattern <- age_pattern[!age_group_id %in% c(2,3,4,5)]
age_pattern <- rbind(age_pattern, age_1)
## CASES AND SAMPLE SIZE
age_pattern[measure_id == 18, sample_size_us := rate_dis * (1-rate_dis)/se_dismod^2]
age_pattern[, cases_us := sample_size_us * rate_dis]
age_pattern[is.nan(sample_size_us), sample_size_us := 0] ##if all draws are 0 can't calculate cases and sample size b/c se = 0, but should both be 0
age_pattern[is.nan(cases_us), cases_us := 0]
## GET SEX ID 3
sex_3 <- copy(age_pattern)
sex_3[, cases_us := sum(cases_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, sample_size_us := sum(sample_size_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, rate_dis := cases_us/sample_size_us]
sex_3[measure_id == 18, se_dismod := sqrt(rate_dis*(1-rate_dis)/sample_size_us)] ##back calculate cases and sample size
sex_3[is.nan(rate_dis), rate_dis := 0] ##if sample_size is 0 can't calculate rate and standard error, but should both be 0
sex_3[is.nan(se_dismod), se_dismod := 0]
sex_3 <- unique(sex_3, by = c("age_group_id", "measure_id", "location_id"))
sex_3[, sex_id := 3]
age_pattern <- rbind(age_pattern, sex_3)
age_pattern[, super_region_id := location_id]
age_pattern <- age_pattern[ ,.(age_group_id, sex_id, measure_id, cases_us, sample_size_us, rate_dis, se_dismod, super_region_id)]
return(age_pattern)
}
## GET POPULATION STRUCTURE
get_pop_structure <- function(locs, years, age_groups){
populations <- get_population(location_id = locs, year_id = years,decomp_step = "step2",
sex_id = c(1, 2, 3), age_group_id = age_groups)
age_1 <- copy(populations) ##create age group id 1 by collapsing lower age groups
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5)]
age_1[, population := sum(population), by = c("location_id", "year_id", "sex_id")]
age_1 <- unique(age_1, by = c("location_id", "year_id", "sex_id"))
age_1[, age_group_id := 1]
populations <- populations[!age_group_id %in% c(2, 3, 4, 5)]
populations <- rbind(populations, age_1) ##add age group id 1 back on
return(populations)
}
## ACTUALLY SPLIT THE DATA
split_data <- function(raw_dt){
dt <- copy(raw_dt)
dt[, total_pop := sum(population), by = "id"]
dt[, sample_size := (population / total_pop) * sample_size]
dt[, cases_dis := sample_size * rate_dis]
dt[, total_cases_dis := sum(cases_dis), by = "id"]
dt[, total_sample_size := sum(sample_size), by = "id"]
dt[, all_age_rate := total_cases_dis/total_sample_size]
dt[, ratio := mean / all_age_rate]
dt[, mean := ratio * rate_dis ]
dt <- dt[mean < 1, ]
dt[, cases := mean * sample_size]
return(dt)
}
## FORMAT DATA TO FINISH
format_data_forfinal <- function(unformatted_dt, location_split_id, region, original_dt){
dt <- copy(unformatted_dt)
dt[, group := 1]
dt[, specificity := "age,sex"]
dt[, group_review := 1]
dt[is.na(crosswalk_parent_seq), crosswalk_parent_seq := seq]
blank_vars <- c("lower", "upper", "effective_sample_size", "standard_error", "uncertainty_type", "uncertainty_type_value", "seq")
dt[, (blank_vars) := NA]
dt <- get_se(dt)
if (region == T) {
dt[, note_modeler := paste0(note_modeler, "| age split using the super region age pattern", date)]
} else {
dt[, note_modeler := paste0(note_modeler, "| age split using the age pattern from location id ", location_split_id, " ", date)]
}
split_ids <- dt[, unique(id)]
dt <- rbind(original_dt[!id %in% split_ids], dt, fill = T)
dt <- dt[, c(names(df)), with = F]
return(dt)
}
###########################################################################################
## FIRST WE AGE-SPLIT INCIDENCE DATA
id <- "DISMOD_ID" ## this is the meid for iterative or wherever age split Dismod was run
version_id <- "DISMOD_VERSION_ID"
measure_id <- 6 ##Measure ID 5= prev, 6=incidence, 18=proportion
region_pattern <- FALSE
# RUN THESE CALLS ---------------------------------------------------------------------------
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
age_groups <- ages[age_start >= 5, age_group_id]
df <- copy(dt_inc)
age <- age_groups
gbd_id <- id
location_pattern_id <- 1
# AGE SPLIT FUNCTION -----------------------------------------------------------------------
## GET TABLES
sex_names <- get_ids(table = "sex")
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
ages[, age_group_weight_value := NULL]
ages[age_start >= 1, age_end := age_end - 1]
ages[age_end == 124, age_end := 99]
super_region_dt <- get_location_metadata(location_set_id = 22)
super_region_dt <- super_region_dt[, .(location_id, super_region_id)]
## SAVE ORIGINAL DATA
original <- copy(df)
original[, id := 1:.N]
## FORMAT DATA
dt <- format_data(original, sex_dt = sex_names)
dt <- get_cases_sample_size(dt)
dt <- get_se(dt)
dt <- calculate_cases_fromse(dt)
## EXPAND AGE
split_dt <- expand_age(dt, age_dt = ages)
## GET PULL LOCATIONS
if (region_pattern == T){
split_dt <- merge(split_dt, super_region_dt, by = "location_id")
super_regions <- unique(split_dt$super_region_id) ##get super regions for dismod results
locations <- super_regions
} else {
locations <- location_pattern_id
}
##GET LOCS AND POPS
pop_locs <- unique(split_dt$location_id)
pop_years <- unique(split_dt$year_id)
## GET AGE PATTERN
print("getting age pattern")
age_pattern <- get_age_pattern(locs = locations, id = gbd_id, age_groups = age)
if (region_pattern == T) {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id", "super_region_id"))
} else {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id"))
}
## GET POPULATION INFO
print("getting pop structure")
pop_structure <- get_pop_structure(locs = pop_locs, years = pop_years, age_group = age)
split_dt <- merge(split_dt, pop_structure, by = c("location_id", "sex_id", "year_id", "age_group_id"))
## CREATE NEW POINTS
print("splitting data")
split_dt <- split_data(split_dt)
final_dt_inc <- format_data_forfinal(split_dt, location_split_id = location_pattern_id, region = region_pattern,
original_dt = original)
###########################################################################################
## NEXT, WE AGE-SPLIT PREVALENCE DATA
id <- "DISMOD_ID" ## this is the meid for iterative or wherever age split Dismod was run
version_id <- "DISMOD_VERSION_ID"
measure_id <- 5
region_pattern <- FALSE
# RUN THESE CALLS ---------------------------------------------------------------------------
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
age_groups <- ages[age_start >= 5, age_group_id]
df <- copy(dt_prev)
age <- age_groups
gbd_id <- id
location_pattern_id <- 1
# AGE SPLIT FUNCTION -----------------------------------------------------------------------
## GET TABLES
sex_names <- get_ids(table = "sex")
ages <- get_age_metadata(12)
setnames(ages, c("age_group_years_start", "age_group_years_end"), c("age_start", "age_end"))
ages[, age_group_weight_value := NULL]
ages[age_start >= 1, age_end := age_end - 1]
ages[age_end == 124, age_end := 99]
super_region_dt <- get_location_metadata(location_set_id = 22)
super_region_dt <- super_region_dt[, .(location_id, super_region_id)]
## SAVE ORIGINAL DATA
original <- copy(df)
original[, id := 1:.N]
## FORMAT DATA
dt <- format_data(original, sex_dt = sex_names)
dt <- get_cases_sample_size(dt)
dt <- get_se(dt)
dt <- calculate_cases_fromse(dt)
## EXPAND AGE
split_dt <- expand_age(dt, age_dt = ages)
## GET PULL LOCATIONS
if (region_pattern == T){
split_dt <- merge(split_dt, super_region_dt, by = "location_id")
super_regions <- unique(split_dt$super_region_id) ##get super regions for dismod results
locations <- super_regions
} else {
locations <- location_pattern_id
}
##GET LOCS AND POPS
pop_locs <- unique(split_dt$location_id)
pop_years <- unique(split_dt$year_id)
## GET AGE PATTERN
print("getting age pattern")
age_pattern <- get_age_pattern(locs = locations, id = gbd_id, age_groups = age)
if (region_pattern == T) {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id", "super_region_id"))
} else {
age_pattern1 <- copy(age_pattern)
split_dt <- merge(split_dt, age_pattern1, by = c("sex_id", "age_group_id", "measure_id"))
}
## GET POPULATION INFO
print("getting pop structure")
pop_structure <- get_pop_structure(locs = pop_locs, years = pop_years, age_group = age)
split_dt <- merge(split_dt, pop_structure, by = c("location_id", "sex_id", "year_id", "age_group_id"))
## CREATE NEW POINTS
print("splitting data")
split_dt <- split_data(split_dt)
final_dt_prev <- format_data_forfinal(split_dt, location_split_id = location_pattern_id, region = region_pattern,
original_dt = original)
###########################################################################################
## LASTLY, APPEND PREVALENCE AND INCIDENCE DATA AND SAVE
append <- rbind.fill(final_dt, final_dt_prev)
write.csv(final_split, "FILEPATH")
|
\name{getFamily}
\alias{getFamily}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get the miRNA Family and add to the miRNA Enrichment Results
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
getFamily(results, mir.fam)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{results}{
%% ~~Describe \code{results} here~~
}
\item{mir.fam}{
%% ~~Describe \code{mir.fam} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (results, mir.fam)
{
results = merge(results, mir.fam[, c(1, 4)], by.x = "miRNA",
by.y = "miRBaseID", all.x = T)
results = aggregate(miRNA ~ ., results, toString)
results$miRNA <- vapply(results$miRNA, paste, collapse = ", ",
character(1L))
return(results)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/getFamily.Rd | no_license | komalsrathi/miRNAEnrich | R | false | false | 1,643 | rd | \name{getFamily}
\alias{getFamily}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get the miRNA Family and add to the miRNA Enrichment Results
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
getFamily(results, mir.fam)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{results}{
%% ~~Describe \code{results} here~~
}
\item{mir.fam}{
%% ~~Describe \code{mir.fam} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (results, mir.fam)
{
results = merge(results, mir.fam[, c(1, 4)], by.x = "miRNA",
by.y = "miRBaseID", all.x = T)
results = aggregate(miRNA ~ ., results, toString)
results$miRNA <- vapply(results$miRNA, paste, collapse = ", ",
character(1L))
return(results)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# Library of functions to streamline processing LGR GGA output into fluxes.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
| /R/LGR_GGA_functionlib.R | no_license | jhmatthes/LGR_GGA_soilflux | R | false | false | 1,746 | r | # Library of functions to streamline processing LGR GGA output into fluxes.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
|
get_files <- function(folder_path, extension, targets){
stopifnot(length(folder_path)==1)
if(!file.exists(folder_path) || !file.info(folder_path)$isdir){
stop('The speficied folder_path "', folder_path, '" either does not exist or is not a directory')
}
## Find files:
files <- unlist(lapply(extension, function(x) return(list.files(folder_path, pattern=paste0('\\.', x, '$')))))
if(length(files)==0){
stop('No files with extension ".', paste(extension, collapse='/'), '" found in the specified folder path')
}
## If the target filename doesn't already end with the first given extension then try adding it:
newtargets <- ifelse(grepl(paste0('\\.', extension[1], '$'), targets), targets, paste0(targets[!grepl(paste0('\\.', extension[1], '$'), targets)], '.', extension[1]))
## Only match exactly:
file_in <- na.omit(match(files, newtargets))
return(data.frame(Filename=targets[file_in], path=file.path(folder_path, newtargets[file_in]), stringsAsFactors=FALSE))
}
read_csv_file <- function(path, skip, date_col, time_col, pH_col, sep, dec, date_format, time_format, ID){
dat <- read.table(path, header=FALSE, sep=sep, dec=dec, skip=skip, stringsAsFactors=FALSE)
if(ncol(dat) < max(c(date_col, time_col, pH_col)))
stop('Unable to read CSV file ', path, ' as the number of columns (', ncol(dat), ') is less than max(c(date_col, time_col, pH_col))')
dat <- data.frame(ID=ID, Date=dat[,date_col], Time=dat[,time_col], pH=dat[,pH_col], stringsAsFactors=FALSE)
# Remove entries with missing date, time or pH:
dat <- dat %>%
filter(!is.na(.data$ID), !is.na(.data$Date), !is.na(.data$Time), !is.na(.data$pH)) %>%
filter(.data$ID!="", .data$Date!="", .data$Time!="", .data$pH!="")
if(nrow(dat)<1){
stop('No valid data in file (zero rows after removing missing or blank date, time and pH)')
}
tt <- dat$Date[1]
dat$Date <- as.Date(dat$Date, format=date_format, tz='GMT')
if(any(is.na(dat$Date))){
stop('Missing dates generated using specified format: ', date_format, ' - first observed date is: ', tt)
}
# If the time does not already contain the year then presume it is missing the date:
tt <- dat$Time[1]
orig_time_format <- time_format
if(!grepl('%Y', time_format) || !grepl('%y', time_format)){
dat$Time <- paste(strftime(dat$Date, format='%Y-%m-%d', tz='GMT'), dat$Time)
time_format <- paste('%Y-%m-%d', time_format)
}
dat$Time <- as.POSIXct(dat$Time, format=time_format, tz='GMT')
if(any(is.na(dat$Time))){
stop('Missing times generated using specified format: ', orig_time_format, ' - first observed time is: ', tt)
}
tt <- dat$pH[1]
dat$pH <- as.numeric(dat$pH)
if(any(is.na(dat$pH))){
stop('Missing pH values generated using specified dec: ', dec, ' - first observed pH is: ', tt)
}
return(dat)
}
read_excel_file <- function(path, skip, date_col, time_col, pH_col, ID){
dat <- as.data.frame(read_excel(path, sheet=1, skip=skip, col_names=FALSE))
if(ncol(dat) < max(c(date_col, time_col, pH_col)))
stop('Unable to read Excel file ', path, ' as the number of columns (', ncol(dat), ') is less than max(c(date_col, time_col, pH_col))')
dat <- data.frame(ID=ID, Date=as.Date(dat[,date_col]), Time=dat[,time_col], pH=dat[,pH_col], stringsAsFactors=FALSE)
# Remove entries with missing date, time or pH:
dat <- dat %>%
filter(!is.na(.data$ID), !is.na(.data$Date), !is.na(.data$Time), !is.na(.data$pH))
if(nrow(dat)<1){
stop('No valid data in file (zero rows after removing missing or blank date, time and pH)')
}
return(dat)
}
| /R/read_files.R | no_license | boydorr/BoluspH | R | false | false | 3,531 | r | get_files <- function(folder_path, extension, targets){
stopifnot(length(folder_path)==1)
if(!file.exists(folder_path) || !file.info(folder_path)$isdir){
stop('The speficied folder_path "', folder_path, '" either does not exist or is not a directory')
}
## Find files:
files <- unlist(lapply(extension, function(x) return(list.files(folder_path, pattern=paste0('\\.', x, '$')))))
if(length(files)==0){
stop('No files with extension ".', paste(extension, collapse='/'), '" found in the specified folder path')
}
## If the target filename doesn't already end with the first given extension then try adding it:
newtargets <- ifelse(grepl(paste0('\\.', extension[1], '$'), targets), targets, paste0(targets[!grepl(paste0('\\.', extension[1], '$'), targets)], '.', extension[1]))
## Only match exactly:
file_in <- na.omit(match(files, newtargets))
return(data.frame(Filename=targets[file_in], path=file.path(folder_path, newtargets[file_in]), stringsAsFactors=FALSE))
}
read_csv_file <- function(path, skip, date_col, time_col, pH_col, sep, dec, date_format, time_format, ID){
dat <- read.table(path, header=FALSE, sep=sep, dec=dec, skip=skip, stringsAsFactors=FALSE)
if(ncol(dat) < max(c(date_col, time_col, pH_col)))
stop('Unable to read CSV file ', path, ' as the number of columns (', ncol(dat), ') is less than max(c(date_col, time_col, pH_col))')
dat <- data.frame(ID=ID, Date=dat[,date_col], Time=dat[,time_col], pH=dat[,pH_col], stringsAsFactors=FALSE)
# Remove entries with missing date, time or pH:
dat <- dat %>%
filter(!is.na(.data$ID), !is.na(.data$Date), !is.na(.data$Time), !is.na(.data$pH)) %>%
filter(.data$ID!="", .data$Date!="", .data$Time!="", .data$pH!="")
if(nrow(dat)<1){
stop('No valid data in file (zero rows after removing missing or blank date, time and pH)')
}
tt <- dat$Date[1]
dat$Date <- as.Date(dat$Date, format=date_format, tz='GMT')
if(any(is.na(dat$Date))){
stop('Missing dates generated using specified format: ', date_format, ' - first observed date is: ', tt)
}
# If the time does not already contain the year then presume it is missing the date:
tt <- dat$Time[1]
orig_time_format <- time_format
if(!grepl('%Y', time_format) || !grepl('%y', time_format)){
dat$Time <- paste(strftime(dat$Date, format='%Y-%m-%d', tz='GMT'), dat$Time)
time_format <- paste('%Y-%m-%d', time_format)
}
dat$Time <- as.POSIXct(dat$Time, format=time_format, tz='GMT')
if(any(is.na(dat$Time))){
stop('Missing times generated using specified format: ', orig_time_format, ' - first observed time is: ', tt)
}
tt <- dat$pH[1]
dat$pH <- as.numeric(dat$pH)
if(any(is.na(dat$pH))){
stop('Missing pH values generated using specified dec: ', dec, ' - first observed pH is: ', tt)
}
return(dat)
}
read_excel_file <- function(path, skip, date_col, time_col, pH_col, ID){
dat <- as.data.frame(read_excel(path, sheet=1, skip=skip, col_names=FALSE))
if(ncol(dat) < max(c(date_col, time_col, pH_col)))
stop('Unable to read Excel file ', path, ' as the number of columns (', ncol(dat), ') is less than max(c(date_col, time_col, pH_col))')
dat <- data.frame(ID=ID, Date=as.Date(dat[,date_col]), Time=dat[,time_col], pH=dat[,pH_col], stringsAsFactors=FALSE)
# Remove entries with missing date, time or pH:
dat <- dat %>%
filter(!is.na(.data$ID), !is.na(.data$Date), !is.na(.data$Time), !is.na(.data$pH))
if(nrow(dat)<1){
stop('No valid data in file (zero rows after removing missing or blank date, time and pH)')
}
return(dat)
}
|
library(caret);library(rpart);library(randomForest);
source('src//multiClassLogisticRegression.R')
source('src//preProcess//preProcess.R')
source('src//graphics//clusterPlot.R')
source('src/preProcess/filter.R')
source('src//preProcess//topNImportantFeature.R')
data <- getCleanData()
set.seed(1234)
trainIndex <- createDataPartition(data[,ncol(data)], p=0.5, list=FALSE)
trainData <- data[trainIndex, ]
xTrain <- trainData[,-ncol(trainData)]
yTrain <- trainData[,ncol(trainData)]
testData <- data[-trainIndex,]
xTest <- testData[,-ncol(testData)]
yTest <- testData[,ncol(testData)]
modelRf <- randomForest(x = xTrain,
y = yTrain,
importance = TRUE)
preds <- predict(modelRf, xTest)
cm <- confusionMatrix(preds, yTest)
# png("cm_random_forest.png")
# p<-tableGrob(cm$table)
# grid.arrange(p)
# dev.off()
test_assignment_data <- read.csv('data/pml-testing.csv')
ids <- test_assignment_data$problem_id
tad <- test_assignment_data[,names(trainData[,-ncol(trainData)])]
results <- predict(modelRf, tad)
answers = rep("A", 20)
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(results)
dim(test_assignment_data)
topnNFeatures <- modelRf$importance[,"A"][order(modelRf$importance[,"A"], decreasing = TRUE)][4]
featurePlot(x=trainData[,topNFeatures],
y = trainData[,ncol(trainData)],
plot='density') | /src/models/randomForest.R | no_license | prasu05/practical_machine_learning_assignment | R | false | false | 1,547 | r | library(caret);library(rpart);library(randomForest);
source('src//multiClassLogisticRegression.R')
source('src//preProcess//preProcess.R')
source('src//graphics//clusterPlot.R')
source('src/preProcess/filter.R')
source('src//preProcess//topNImportantFeature.R')
data <- getCleanData()
set.seed(1234)
trainIndex <- createDataPartition(data[,ncol(data)], p=0.5, list=FALSE)
trainData <- data[trainIndex, ]
xTrain <- trainData[,-ncol(trainData)]
yTrain <- trainData[,ncol(trainData)]
testData <- data[-trainIndex,]
xTest <- testData[,-ncol(testData)]
yTest <- testData[,ncol(testData)]
modelRf <- randomForest(x = xTrain,
y = yTrain,
importance = TRUE)
preds <- predict(modelRf, xTest)
cm <- confusionMatrix(preds, yTest)
# png("cm_random_forest.png")
# p<-tableGrob(cm$table)
# grid.arrange(p)
# dev.off()
test_assignment_data <- read.csv('data/pml-testing.csv')
ids <- test_assignment_data$problem_id
tad <- test_assignment_data[,names(trainData[,-ncol(trainData)])]
results <- predict(modelRf, tad)
answers = rep("A", 20)
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(results)
dim(test_assignment_data)
topnNFeatures <- modelRf$importance[,"A"][order(modelRf$importance[,"A"], decreasing = TRUE)][4]
featurePlot(x=trainData[,topNFeatures],
y = trainData[,ncol(trainData)],
plot='density') |
source("libs/functions.R")
echo("BacGWAS command line: ", commandArgs(trailingOnly = TRUE))
options("scipen" = 100, "digits" = 4)
.htmlOptions <- c("smartypants", "base64_images", "toc")
.startWd <- getwd()
.usageString <- "
Usage:
bacgwas.R --plugin=NAME --input=DIR --output=DIR [--html]
Options:
--plugin=NAME Name of plugin to use. Corresponds to dir name within
./plugins folder.
--input=DIR Path to input directory. Input itself is plugin-specific,
please read plugin docs for details.
--output=DIR Path to output directory. Actual output files are specific to
selected plugin type.
--html Switch plugin to html mode (check if plugin supports this
mode before trying this option).
"
includer(c("knitr", "Cairo", "markdown", "docopt"))
source("conf/config.R")
.opt <- docopt(.usageString)
.plugin_home <- normalizePath(
paste("plugins", .opt$plugin, sep = "/"),
winslash = "/",
mustWork = TRUE
)
.input <- normalizePath(.opt$input, winslash = "/", mustWork = TRUE)
.output <- normalizePath(.opt$output, winslash = "/", mustWork = FALSE)
if (!file.exists(.output))
dir.create(.opt$output, recursive = TRUE)
tryCatch({
setwd(.plugin_home)
cparams <- c(config$common, config[[.opt$plugin]])
if (.opt$html) {
if (file.exists("init.rmd")) {
.markdownFile <- tempfile(
pattern = "temp",
tmpdir = tempdir(),
fileext = ".md"
)
.markdownFile <- normalizePath(
.markdownFile,
winslash = "/",
mustWork = FALSE
)
.opt$picsdir <- paste0(tempdir(), "/figure")
.picsdir <- normalizePath(.opt$picsdir, winslash = "/", mustWork = FALSE)
opts_chunk$set(
dev = "png",
self.contained = TRUE,
dpi = 96,
dev.args = list(type = "cairo"),
fig.path = sub("([^/])$", "\\1/", .picsdir)
)
.report <- normalizePath(
paste(.output, "result.html", sep = "/"),
winslash = "/",
mustWork = FALSE
)
.template <- normalizePath("init.rmd", winslash = "/", mustWork = TRUE)
tryCatch({
inject_args(cparams)
knit(.template, .markdownFile, quiet = TRUE)
markdownToHTML(
.markdownFile,
output = .report,
options = .htmlOptions,
fragment.only = FALSE
)
}, finally = {
if (file.exists(.markdownFile)) {
echo("Remove intermediate markdown file: ", .markdownFile)
file.remove(.markdownFile)
}
if (file.exists(.picsdir)) {
echo("Remove pictures dir: ", .picsdir)
unlink(.picsdir, recursive = TRUE)
}
})
} else {
echo("HTML mode is not supported for selected plugin")
}
} else {
if (file.exists("init.R")) {
source("init.R")
if (exists("plugin_do")) {
inject_args(cparams)
plugin_do(.input, .output)
} else {
echo("Incorrect plugin: function plugin_do not defined!")
}
} else {
echo("Text mode is not supported for selected plugin")
}
}
}, finally = {
setwd(.startWd)
})
| /bacgwas.R | permissive | ikavalio/MDRTB-pipe | R | false | false | 3,183 | r | source("libs/functions.R")
echo("BacGWAS command line: ", commandArgs(trailingOnly = TRUE))
options("scipen" = 100, "digits" = 4)
.htmlOptions <- c("smartypants", "base64_images", "toc")
.startWd <- getwd()
.usageString <- "
Usage:
bacgwas.R --plugin=NAME --input=DIR --output=DIR [--html]
Options:
--plugin=NAME Name of plugin to use. Corresponds to dir name within
./plugins folder.
--input=DIR Path to input directory. Input itself is plugin-specific,
please read plugin docs for details.
--output=DIR Path to output directory. Actual output files are specific to
selected plugin type.
--html Switch plugin to html mode (check if plugin supports this
mode before trying this option).
"
includer(c("knitr", "Cairo", "markdown", "docopt"))
source("conf/config.R")
.opt <- docopt(.usageString)
.plugin_home <- normalizePath(
paste("plugins", .opt$plugin, sep = "/"),
winslash = "/",
mustWork = TRUE
)
.input <- normalizePath(.opt$input, winslash = "/", mustWork = TRUE)
.output <- normalizePath(.opt$output, winslash = "/", mustWork = FALSE)
if (!file.exists(.output))
dir.create(.opt$output, recursive = TRUE)
tryCatch({
setwd(.plugin_home)
cparams <- c(config$common, config[[.opt$plugin]])
if (.opt$html) {
if (file.exists("init.rmd")) {
.markdownFile <- tempfile(
pattern = "temp",
tmpdir = tempdir(),
fileext = ".md"
)
.markdownFile <- normalizePath(
.markdownFile,
winslash = "/",
mustWork = FALSE
)
.opt$picsdir <- paste0(tempdir(), "/figure")
.picsdir <- normalizePath(.opt$picsdir, winslash = "/", mustWork = FALSE)
opts_chunk$set(
dev = "png",
self.contained = TRUE,
dpi = 96,
dev.args = list(type = "cairo"),
fig.path = sub("([^/])$", "\\1/", .picsdir)
)
.report <- normalizePath(
paste(.output, "result.html", sep = "/"),
winslash = "/",
mustWork = FALSE
)
.template <- normalizePath("init.rmd", winslash = "/", mustWork = TRUE)
tryCatch({
inject_args(cparams)
knit(.template, .markdownFile, quiet = TRUE)
markdownToHTML(
.markdownFile,
output = .report,
options = .htmlOptions,
fragment.only = FALSE
)
}, finally = {
if (file.exists(.markdownFile)) {
echo("Remove intermediate markdown file: ", .markdownFile)
file.remove(.markdownFile)
}
if (file.exists(.picsdir)) {
echo("Remove pictures dir: ", .picsdir)
unlink(.picsdir, recursive = TRUE)
}
})
} else {
echo("HTML mode is not supported for selected plugin")
}
} else {
if (file.exists("init.R")) {
source("init.R")
if (exists("plugin_do")) {
inject_args(cparams)
plugin_do(.input, .output)
} else {
echo("Incorrect plugin: function plugin_do not defined!")
}
} else {
echo("Text mode is not supported for selected plugin")
}
}
}, finally = {
setwd(.startWd)
})
|
#### 1. 데이터 전처리 ####
library(tidyverse); library(reshape2); library(tibble); library(stringr)
rdata <- list()
#### 가. 전세계 확진자와 사망자 등 ####
rdata$url <- c("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv",
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv",
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv")
# 날짜 및 나라별 확진자 수
rdata$confirmedCases <- read_csv(rdata$url[1]) %>% select(-c(Lat,Long)) %>%
melt(id=c('Country/Region','Province/State')) %>%
rename("Country"=1, "State"=2, "Variable"=3, "Confirmed"=4) %>%
group_by(Country, Variable) %>% summarise(Confirmed=sum(Confirmed)) %>%
rename("Country"=1,"Date"=2,"Confirmed"=3)
# 날짜 및 나라별 사망자 수
rdata$DeathCases <- read_csv(rdata$url[2]) %>% select(-c(Lat,Long)) %>%
melt(id=c('Country/Region','Province/State')) %>%
rename("Country"=1,State=2, "Variable"=3, "Deaths"=4) %>%
group_by(Country, Variable) %>% summarise(Confirmed=sum(Deaths)) %>%
rename("Country"=1,"Date"=2,"Deaths"=3)
# 날짜 및 나라별 완치자 수
rdata$recoveredCases <- read_csv(rdata$url[3]) %>% select(-c(Lat,Long)) %>%
melt(id=c('Country/Region','Province/State')) %>%
rename("Country"=1,State=2, "Variable"=3, "Recovered"=4) %>%
group_by(Country, Variable) %>% summarise(Confirmed=sum(Recovered)) %>%
rename("Country"=1,"Date"=2,"Recovered"=3)
# 확진자, 사망자, 완치자 합치기
rdata$World <- merge(merge(rdata$confirmedCases, rdata$DeathCases,
by.y=c("Country","Date")), rdata$recoveredCases, by.y=c("Country","Date")) %>%
mutate(Date=as.Date(.$Date, "%m/%d/%y"))
# 타이완에 *표 없애기, 우리나라 Korea로 표현하기, 국가 이름 일치시키기
rdata$World$Country <- gsub("Taiwan\\*", "Taiwan", rdata$World$Country)
rdata$World$Country <- gsub("Korea\\, South", "Korea", rdata$World$Country)
# 사망률 계산하기
head(rdata$World <- rdata$World %>%
mutate(DeathRate=ifelse(Confirmed==0, 0, 100*Deaths/Confirmed)) %>%
arrange(Country, Date))
max(rdata$World$Date)
#### 나. 나라별 인구수 ####
rdata$Population <- read_csv("data/Population.csv") %>%
filter(Year=="2019") %>% select(c(1,3)) # 인구는 2019년 기준
# 국가 이름 확인
# setdiff(rdata$World$Country, rdata$Population$Country)
# unique(rdata$World$Country)
# rdata$Population$Country
# rdata$Population %>% filter(Country=="Timor")
# 이름 일치시키기(페로, 홍콩, 팔레스타인 제외)
rdata$Population$Country <- gsub("South Korea", "Korea", rdata$Population$Country)
rdata$Population$Country <- gsub("United States", "US", rdata$Population$Country)
rdata$Population$Country <- gsub("Czech Republic", "Czechia",
rdata$Population$Country)
rdata$Population$Country <- gsub("East Timorc", "Timorc", rdata$Population$Country)
rdata$World$Country <- gsub("Bahamas, The", "Bahamas", rdata$World$Country)
rdata$World$Country <- gsub("North Macedonia", "Macedonia", rdata$World$Country)
rdata$World$Country <- gsub("Gambia, The", "Gambia", rdata$World$Country)
rdata$World$Country <- gsub("East Timor", "Timor", rdata$World$Country)
# 검사자 수, 인구, 확진자, 사망자, 완치자 데이터 모두 합치기
head(data <- merge(rdata$Population, rdata$World, by='Country') %>%
arrange(Country, Date))
# 백만명당 확진자, 사망자 구하기
data <- data %>% mutate(ConfirmedperM=Confirmed*1000000/Population) %>%
mutate(DeathsperM=Deaths*1000000/Population)
#### 2. 변화 추세 비교 ####
#### 가. 중국, 한국, 이탈리아 확진자 수 비교 ####
library(gganimate); library(scales)
data %>% filter(Date==Sys.Date()-1)
# 전날 데이터가 없으면 Sys.Date()-1로, 있으면 Sys.Date()로 해주세요.
print(China <-data %>% filter(Country=="China" &
Date>="2020-01-23" & Date<Sys.Date()) %>% arrange(Country, Date))
print(Italy <-data %>% filter(Country=="Italy" &
Date>="2020-02-22" & Date<Sys.Date()) %>% arrange(Country, Date))
print(Korea <-data %>% filter(Country=="Korea" &
Date>="2020-02-18" & Date<Sys.Date()) %>% arrange(Country, Date))
China$Date <- c(1:nrow(China))
Italy$Date <- c(1:nrow(Italy))
Korea$Date <- c(1:nrow(Korea))
# 전날 데이터가 없으면 nrow(Iraq)-1로, 있으면 nrow(Iraq)로 수정해 주세요.
Line <- rbind(China[1:nrow(Italy),], Italy[1:nrow(Italy),],
Korea[1:nrow(Italy),])
print(result <- ggplot(Line, aes(x=Date, y=Confirmed, color=Country)) +
scale_y_continuous(labels=comma) + theme_classic() +
geom_line(size=1.2) + geom_point(size=5) +
geom_segment(aes(xend=max(Date)+1, yend=Confirmed), linetype=2) +
geom_text(aes(x=max(Date)+5,
label=paste0(comma(Confirmed, accuracy=1))), size=7) +
theme(legend.position=c(0.3, 0.8), text=element_text(size=25),
plot.margin=margin(10, 30, 10, 10)) +
transition_reveal(Date) + view_follow(fixed_y=T) +
coord_cartesian(clip='off'))
animate(result, 300, fps=10, duration=30, end_pause=100, width=500, height=400,
renderer=gifski_renderer("ChinaItalyKorea.gif"))
#### 나. 백만명당 확진자 수 비교 ####
print(result <- ggplot(Line, aes(x=Date, y=ConfirmedperM, color=Country)) +
scale_y_continuous(labels=comma) + theme_classic() +
labs(y = "Confirmed Cases per million")+
geom_line(size=1.2) + geom_point(size=5) +
geom_segment(aes(xend=max(Date)+1, yend=ConfirmedperM), linetype=2) +
geom_text(aes(x=max(Date)+5,
label=paste0(comma(ConfirmedperM, accuracy=1))), size=7) +
theme(legend.position=c(0.3, 0.8), text=element_text(size=25),
plot.margin=margin(10, 30, 10, 10)) +
transition_reveal(Date) + view_follow(fixed_y=T) +
coord_cartesian(clip='off'))
animate(result, 300, fps=10, duration=30, end_pause=100, width=500, height=400,
renderer=gifski_renderer("CIKConfirmedperM.gif"))
#### 다. 사망자 수 비교 ####
print(result <- ggplot(Line, aes(x=Date, y=Deaths, color=Country)) +
scale_y_continuous(labels=comma) + theme_classic() +
labs(y = "Confirmed Cases per million")+
geom_line(size=1.2) + geom_point(size=5) +
geom_segment(aes(xend=max(Date)+1, yend=Deaths), linetype=2) +
geom_text(aes(x=max(Date)+5,
label=paste0(comma(Deaths, accuracy=1))), size=7) +
theme(legend.position=c(0.3, 0.8), text=element_text(size=25),
plot.margin=margin(10, 30, 10, 10)) +
transition_reveal(Date) + view_follow(fixed_y=T) +
coord_cartesian(clip='off'))
animate(result, 300, fps=10, duration=30, end_pause=100, width=500, height=400,
renderer=gifski_renderer("CIKDeaths.gif"))
| /20 이탈리아현황.R | permissive | seeun1203/COVID-19 | R | false | false | 7,110 | r | #### 1. 데이터 전처리 ####
library(tidyverse); library(reshape2); library(tibble); library(stringr)
rdata <- list()
#### 가. 전세계 확진자와 사망자 등 ####
rdata$url <- c("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv",
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv",
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv")
# 날짜 및 나라별 확진자 수
rdata$confirmedCases <- read_csv(rdata$url[1]) %>% select(-c(Lat,Long)) %>%
melt(id=c('Country/Region','Province/State')) %>%
rename("Country"=1, "State"=2, "Variable"=3, "Confirmed"=4) %>%
group_by(Country, Variable) %>% summarise(Confirmed=sum(Confirmed)) %>%
rename("Country"=1,"Date"=2,"Confirmed"=3)
# 날짜 및 나라별 사망자 수
rdata$DeathCases <- read_csv(rdata$url[2]) %>% select(-c(Lat,Long)) %>%
melt(id=c('Country/Region','Province/State')) %>%
rename("Country"=1,State=2, "Variable"=3, "Deaths"=4) %>%
group_by(Country, Variable) %>% summarise(Confirmed=sum(Deaths)) %>%
rename("Country"=1,"Date"=2,"Deaths"=3)
# 날짜 및 나라별 완치자 수
rdata$recoveredCases <- read_csv(rdata$url[3]) %>% select(-c(Lat,Long)) %>%
melt(id=c('Country/Region','Province/State')) %>%
rename("Country"=1,State=2, "Variable"=3, "Recovered"=4) %>%
group_by(Country, Variable) %>% summarise(Confirmed=sum(Recovered)) %>%
rename("Country"=1,"Date"=2,"Recovered"=3)
# 확진자, 사망자, 완치자 합치기
rdata$World <- merge(merge(rdata$confirmedCases, rdata$DeathCases,
by.y=c("Country","Date")), rdata$recoveredCases, by.y=c("Country","Date")) %>%
mutate(Date=as.Date(.$Date, "%m/%d/%y"))
# 타이완에 *표 없애기, 우리나라 Korea로 표현하기, 국가 이름 일치시키기
rdata$World$Country <- gsub("Taiwan\\*", "Taiwan", rdata$World$Country)
rdata$World$Country <- gsub("Korea\\, South", "Korea", rdata$World$Country)
# 사망률 계산하기
head(rdata$World <- rdata$World %>%
mutate(DeathRate=ifelse(Confirmed==0, 0, 100*Deaths/Confirmed)) %>%
arrange(Country, Date))
max(rdata$World$Date)
#### 나. 나라별 인구수 ####
rdata$Population <- read_csv("data/Population.csv") %>%
filter(Year=="2019") %>% select(c(1,3)) # 인구는 2019년 기준
# 국가 이름 확인
# setdiff(rdata$World$Country, rdata$Population$Country)
# unique(rdata$World$Country)
# rdata$Population$Country
# rdata$Population %>% filter(Country=="Timor")
# 이름 일치시키기(페로, 홍콩, 팔레스타인 제외)
rdata$Population$Country <- gsub("South Korea", "Korea", rdata$Population$Country)
rdata$Population$Country <- gsub("United States", "US", rdata$Population$Country)
rdata$Population$Country <- gsub("Czech Republic", "Czechia",
rdata$Population$Country)
rdata$Population$Country <- gsub("East Timorc", "Timorc", rdata$Population$Country)
rdata$World$Country <- gsub("Bahamas, The", "Bahamas", rdata$World$Country)
rdata$World$Country <- gsub("North Macedonia", "Macedonia", rdata$World$Country)
rdata$World$Country <- gsub("Gambia, The", "Gambia", rdata$World$Country)
rdata$World$Country <- gsub("East Timor", "Timor", rdata$World$Country)
# 검사자 수, 인구, 확진자, 사망자, 완치자 데이터 모두 합치기
head(data <- merge(rdata$Population, rdata$World, by='Country') %>%
arrange(Country, Date))
# 백만명당 확진자, 사망자 구하기
data <- data %>% mutate(ConfirmedperM=Confirmed*1000000/Population) %>%
mutate(DeathsperM=Deaths*1000000/Population)
#### 2. 변화 추세 비교 ####
#### 가. 중국, 한국, 이탈리아 확진자 수 비교 ####
library(gganimate); library(scales)
data %>% filter(Date==Sys.Date()-1)
# 전날 데이터가 없으면 Sys.Date()-1로, 있으면 Sys.Date()로 해주세요.
print(China <-data %>% filter(Country=="China" &
Date>="2020-01-23" & Date<Sys.Date()) %>% arrange(Country, Date))
print(Italy <-data %>% filter(Country=="Italy" &
Date>="2020-02-22" & Date<Sys.Date()) %>% arrange(Country, Date))
print(Korea <-data %>% filter(Country=="Korea" &
Date>="2020-02-18" & Date<Sys.Date()) %>% arrange(Country, Date))
China$Date <- c(1:nrow(China))
Italy$Date <- c(1:nrow(Italy))
Korea$Date <- c(1:nrow(Korea))
# 전날 데이터가 없으면 nrow(Iraq)-1로, 있으면 nrow(Iraq)로 수정해 주세요.
Line <- rbind(China[1:nrow(Italy),], Italy[1:nrow(Italy),],
Korea[1:nrow(Italy),])
print(result <- ggplot(Line, aes(x=Date, y=Confirmed, color=Country)) +
scale_y_continuous(labels=comma) + theme_classic() +
geom_line(size=1.2) + geom_point(size=5) +
geom_segment(aes(xend=max(Date)+1, yend=Confirmed), linetype=2) +
geom_text(aes(x=max(Date)+5,
label=paste0(comma(Confirmed, accuracy=1))), size=7) +
theme(legend.position=c(0.3, 0.8), text=element_text(size=25),
plot.margin=margin(10, 30, 10, 10)) +
transition_reveal(Date) + view_follow(fixed_y=T) +
coord_cartesian(clip='off'))
animate(result, 300, fps=10, duration=30, end_pause=100, width=500, height=400,
renderer=gifski_renderer("ChinaItalyKorea.gif"))
#### 나. 백만명당 확진자 수 비교 ####
print(result <- ggplot(Line, aes(x=Date, y=ConfirmedperM, color=Country)) +
scale_y_continuous(labels=comma) + theme_classic() +
labs(y = "Confirmed Cases per million")+
geom_line(size=1.2) + geom_point(size=5) +
geom_segment(aes(xend=max(Date)+1, yend=ConfirmedperM), linetype=2) +
geom_text(aes(x=max(Date)+5,
label=paste0(comma(ConfirmedperM, accuracy=1))), size=7) +
theme(legend.position=c(0.3, 0.8), text=element_text(size=25),
plot.margin=margin(10, 30, 10, 10)) +
transition_reveal(Date) + view_follow(fixed_y=T) +
coord_cartesian(clip='off'))
animate(result, 300, fps=10, duration=30, end_pause=100, width=500, height=400,
renderer=gifski_renderer("CIKConfirmedperM.gif"))
#### 다. 사망자 수 비교 ####
print(result <- ggplot(Line, aes(x=Date, y=Deaths, color=Country)) +
scale_y_continuous(labels=comma) + theme_classic() +
labs(y = "Confirmed Cases per million")+
geom_line(size=1.2) + geom_point(size=5) +
geom_segment(aes(xend=max(Date)+1, yend=Deaths), linetype=2) +
geom_text(aes(x=max(Date)+5,
label=paste0(comma(Deaths, accuracy=1))), size=7) +
theme(legend.position=c(0.3, 0.8), text=element_text(size=25),
plot.margin=margin(10, 30, 10, 10)) +
transition_reveal(Date) + view_follow(fixed_y=T) +
coord_cartesian(clip='off'))
animate(result, 300, fps=10, duration=30, end_pause=100, width=500, height=400,
renderer=gifski_renderer("CIKDeaths.gif"))
|
# Hierarchical clustering
## libraries
require(scorecard) # split_df
require(FSA)
require(factoextra) # fviz_dend
require(fields) # image.plot
require(dplyr) # %>%
require(class)
require(caret)
require(dendextend) #circle dendogram
require(circlize) #circle dendogram
require(cluster)
## seed
seed = 123
set.seed(seed)
## split ratio
split.ratio = c(0.7, 0.3)
## number of classes in target variable
n.groups = 2
## functions
accFromCm = function(pred, true) { confusionMatrix(pred, true)$overall[1] }
factorizefeatures = function(dataset){
dataset$gender = as.factor(dataset$gender)
dataset$choles = as.factor(dataset$choles)
dataset$glucose = as.factor(dataset$glucose)
dataset$smoke = as.factor(dataset$smoke)
dataset$alcohol = as.factor(dataset$alcohol)
dataset$active = as.factor(dataset$active)
dataset$cardio = as.factor(dataset$cardio)
return(dataset)
}
unfactorizefeatures = function(dataset){
dataset$gender = as.numeric(as.character(dataset$gender))
dataset$choles = as.numeric(as.character(dataset$choles))
dataset$glucose = as.numeric(as.character(dataset$glucose))
dataset$smoke = as.numeric(as.character(dataset$smoke))
dataset$alcohol = as.numeric(as.character(dataset$alcohol))
dataset$active = as.numeric(as.character(dataset$active))
dataset$cardio = as.numeric(as.character(dataset$cardio))
return(dataset)
}
reduce.data.set = function(dataset, leng, seed){
set.seed(seed)
dataset$cardio = as.factor(dataset$cardio)
dataset = dataset %>%
group_by(cardio) %>%
sample_n(size = (leng/2) )
return(dataset)
}
standardize.data.set = function(dataset){
# cannot have factorized data, must be numeric
dataset$cardio = as.numeric(as.character(dataset$cardio))
# standardization (imperative)
dataset = scale(dataset)
return(dataset) # return(data.frame(dataset))
}
feature.selection.with.t.stat = function(dataset){
dataset = data.frame(dataset)
s=c(rep(0,11)) # vector to store the values of t statistic
for(i in 1:11){
s[i] = t.test(dataset[dataset$cardio==0,i],
dataset[dataset$cardio==1,i],
var.equal=TRUE)$statistic
}
# we want the biggest t statistic
b = order(abs(s))
print(names(dataset[,b[1:3]])) # removed ones
return(dataset[,b[4:11]]) #removing the 3 lowest
}
get.hclust.train.test.error = function(model, n.groups, x.train, x.test, y.train, y.test){
# based on https://stackoverflow.com/questions/21064315/how-do-i-predict-new-datas-cluster-after-clustering-training-data
groups = cutree(model, k=n.groups)
groups = groups-1
#table(groups)
pred.train = knn(train=x.train, test=x.train, cl=groups, k=1)
pred.test = knn(train=x.train, test=x.test, cl=groups, k=1)
return(list(accFromCm(pred.train, y.train),accFromCm(pred.test, y.test)))
}
plot.image.plot = function(x, xlab, main){
image.plot(1:ncol(x), 1:nrow(x),
t(x),
col = tim.colors(500),
xlab = xlab,
ylab="Patients",
main = main,
cex.lab=1)
}
#############################################
#############################################
# Euclidean distance
setwd("C:/Users/mjlav/MEOCloud/Universidade/mestrado_up/ano1/statistic_data_analysis/project/sda_project")
## read data - no transformations on the data)
data.set= read.csv("./data/cardio_data.csv")
headtail(data.set)
## dimension reduction
data.set = reduce.data.set(data.set, 50, seed)
## split data
tts = split_df(data.set, ratio = split.ratio, seed = seed)
## standardize the data for eucidean distance
std.train = standardize.data.set(tts$train)
std.test = standardize.data.set(tts$test)
###
## complete model
x.train.1 = std.train[,-12]
x.test.1 = std.test[,-12]
# plot the data, diseases in rows and predictors in columns
plot.image.plot(x.train.1,
"age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
"main" )
heatmap(x.train.1)
# The rows are ordered based on the order of the hierarchical clustering.
# The colored bar indicates the cardio category each row belongs to.
# The color in the heatmap indicates the length of each measurement
# (from light yellow to dark red).
# plot dendograms for different methods
# label colors represent true value
colors = c("#00AFBB","#FC4E07")
maped.true.values = as.numeric(tts$train$cardio) # as.numeric because colors must be positive
df = data.frame(0,0,0,0)
names(df) = c("method", "order", "dendogram", "accuracy")
methods.list = list("ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid")
dist = daisy(x.train.1, metric="euclidean")
for(m in methods.list){
print(m)
hier.mod = hclust(dist, method=m)
label.colors = colors[maped.true.values[hier.mod$order]]
d = fviz_dend(hier.mod, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
#horiz = T,
ggtheme=theme_minimal(),
main=m)
print(d)
df[nrow(df)+1,] = list(m,
list(hier.mod$order),
list(d),
list(get.hclust.train.test.error(hier.mod, n.groups,
x.train.1, x.test.1,
as.factor(tts$train$cardio),as.factor(tts$test$cardio))))
}
# by looking at plots (data.set size = 50), ward and ward.d2 are the best
df$method[3]
df$accuracy[3]
# the accuracy of Ward.D2 is the best
euclidean.dist = daisy(x.train.1, metric ="euclidean")
hier.mod = hclust(euclidean.dist, method="ward.D2")
# color using kmeans cluster
km.clust = kmeans(x.train.1, n.groups)$cluster
label.colors = colors[km.clust[hier.mod$order]]
fviz_dend(hier.mod, k = n.groups,
k_colors = colors,
label_cols = label.colors,
cex = 0.6, main="Ward.D2 - k means coloring")
# do hierarchical classification using the ward.D2 method
# patients order
patients.order = hier.mod$order
label.colors = colors[maped.true.values[hier.mod$order]]
# draw the dendrogram.
fviz_dend(hier.mod, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
ggtheme=theme_minimal(), main="Ward.D2 - true coloring")
dend = as.dendrogram(hier.mod)
par(mar = rep(0,4))
circlize_dendrogram(dend)
# heatmap
plot.image.plot(x.train.1[patients.order,],
"age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
"patients order" )
# predictors order
euclid.dist.pred.1 = dist(t(x.train.1)) # euclidean distance
hier.mod.pred.1 = hclust(euclid.dist.pred.1, method="average")
predictors.order = hier.mod.pred.1$order
# draw the dendrogram.
fviz_dend(hier.mod.pred.1, k=n.groups, cex=0.5, k_colors = c("#00AFBB","#FC4E07"),
color_labels_by_k=TRUE, ggtheme=theme_minimal())
dend.pred.1 = as.dendrogram(hier.mod.pred.1)
par(mar = rep(0,4))
circlize_dendrogram(dend.pred.1)
# heatmap
plot.image.plot(x.train.1[,predictors.order],
"aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
"predictors order" )
# xlab must be equal to names(data.set[,predictors.order])
# patients and predictors order
plot.image.plot(x.train.1[patients.order,predictors.order],
"aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
"patients and predictors order" )
######################################################################
######################################################################
# Gowers distance
setwd("C:/Users/mjlav/MEOCloud/Universidade/mestrado_up/ano1/statistic_data_analysis/project/sda_project")
## read data - no transformations on the data)
data.set= read.csv("./data/cardio_data.csv")
headtail(data.set)
## dimension reduction
data.set = reduce.data.set(data.set, 50, seed)
## factorize the data for gowers distance (so the categorical variables are treated with nominal scale)
data.set = factorizefeatures(data.set)
## split data
tts = split_df(data.set, ratio = split.ratio, seed = seed)
###
## complete model
x.train.1 = tts$train[,-12]
x.test.1 = tts$test[,-12]
# plot the data, diseases in rows and predictors in columns
# plot.image.plot(x.train.1,
# "age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
# "main" )
# heatmap(x.train.1)
# The rows are ordered based on the order of the hierarchical clustering.
# The colored bar indicates the cardio category each row belongs to.
# The color in the heatmap indicates the length of each measurement
# (from light yellow to dark red).
# plot dendograms for different methods
colors = c("#00AFBB","#FC4E07")
maped.true.values = as.numeric(tts$train$cardio) # as.numeric because colors must be positive
label.colors = colors[maped.true.values[hier.mod$order]]
df = data.frame(0,0,0,0)
names(df) = c("method", "order", "dendogram", "accuracy")
methods.list = list("ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid")
gower.dist = daisy(x.train.1, metric ="gower")
for(m in methods.list){
print(m)
hier.mod = hclust(gower.dist, method=m)
d = fviz_dend(hier.mod, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
#horiz = T,
ggtheme=theme_minimal(),
main=m)
print(d)
df[nrow(df)+1,] = list(m,
list(hier.mod$order),
list(d),
list(get.hclust.train.test.error(hier.mod, n.groups,
x.train.1, x.test.1,
as.factor(tts$train$cardio),as.factor(tts$test$cardio))))
}
df$accuracy
df$method[3]
df$accuracy[3]
df$method[5]
df$accuracy[5]
method = "ward.D2"
# color using kmeans cluster
km.clust = kmeans(x.train.1, n.groups)$cluster
gower.dist = daisy(x.train.1, metric ="gower")
hier.mod = hclust(gower.dist, method=method)
fviz_dend(hier.mod, k = n.groups,
k_colors = c("#00AFBB","#FC4E07"),
label_cols = km.clust[hier.mod$order], cex = 0.6)
# do hierarchical classification using the ward.D2 link
# patients order
gower.dist.pat.1 = daisy(x.train.1, metric ="gower")
hier.mod.pat.1 = hclust(gower.dist.pat.1, method=method)
patients.order = hier.mod.pat.1$order
# draw the dendrogram.
fviz_dend(hier.mod.pat.1, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
#horiz = T,
ggtheme=theme_minimal(),
main=paste(method, " - true coloring"))
# dend.pat.1 = as.dendrogram(hier.mod.pat.1)
# par(mar = rep(0,4))
# circlize_dendrogram(dend.pat.1)
# heatmap
# works better with standardized variables...
plot.image.plot(unfactorizefeatures(x.train.1[patients.order,]),
"age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
"patients order" )
# # predictors order
# euclid.dist.pred.1 = dist(t(x.train.1)) # euclidean distance
# hier.mod.pred.1 = hclust(gower.dist.pred.1, method="average")
# predictors.order = hier.mod.pred.1$order
#
# # draw the dendrogram.
# fviz_dend(hier.mod.pred.1, k=n.groups, cex=0.5, k_colors = c("#00AFBB","#FC4E07"),
# color_labels_by_k=TRUE, ggtheme=theme_minimal())
#
# dend.pred.1 = as.dendrogram(hier.mod.pred.1)
# par(mar = rep(0,4))
# circlize_dendrogram(dend.pred.1)
#
# # heatmap
# plot.image.plot(x.train.1[,predictors.order],
# "aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
# "predictors order" )
#
#
# # xlab must be equal to names(data.set[,predictors.order])
# # patients and predictors order
# plot.image.plot(x.train.1[patients.order,predictors.order],
# "aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
# "patients and predictors order" )
## to be checked
###
## feature selection - based on EDA of cardio.r
# remove gender, smoke and alcohol
x.train.2 = tts$train[, -c(2, 9, 10, 12)]
x.test.2 = tts$test[, -c(2, 9, 10, 12)]
# plot the data, diseases in rows and predictors in columns
image.plot(1:ncol(x.train.2), 1:nrow(x.train.2), t(x.train.2), # t(x) matrix transpose
col=tim.colors(8),
xlab="alcohol,glucose,smoke,age,weight,choles,aplo,aphi", ylab="No. cardio disease",
cex.lab=1)
# Do hierarchical classification using the average link
euclid.dist.2 = dist(x.train.2) # euclidean distance
hier.mod.2 = hclust(euclid.dist.2, method="average")
# draw the dendrogram.
fviz_dend(hier.mod.2, k =n.groups, cex = 0.5, k_colors = c("#00AFBB","#FC4E07"),
color_labels_by_k = TRUE, ggtheme = theme_minimal())
###
## feature selection - based on t statistics
x.train.3 = feature.selection.with.t.stat(tts$train)
# removed "alcohol" "gender" "glucose"
names(x.train.3)
t = data.frame(tts$test)
x.test.3 = t[, names(x.train.3)]
# plot the data, diseases in rows and predictors in columns
image.plot(1:ncol(x.train.3), 1:nrow(x.train.3), t(x.train.3), # t(x) matrix transpose
col=tim.colors(8),
xlab="alcohol,glucose,smoke,age,weight,choles,aplo,aphi", ylab="No. cardio disease",
cex.lab=1)
# do hierarchical classification using the average link
euclid.dist.3 = dist(x.train.3) # euclidean distance
hier.mod.3 = hclust(euclid.dist.3, method="average")
# draw the dendrogram.
fviz_dend(hier.mod.3, k =n.groups, cex = 0.5, k_colors = c("#00AFBB","#FC4E07"),
color_labels_by_k = TRUE, ggtheme = theme_minimal())
### train test error
hier.tt.res = data.frame(0,0,0)
names(hier.tt.res) = c("method", "train.error", "test.error")
hier.tt.res[1,] = get.hclust.train.test.error(hier.mod.1, n.groups,
x.train.1, x.test.1,
as.factor(tts$train$cardio),as.factor(tts$test$cardio),
'with outliers - complete model')
hier.tt.res[nrow(hier.tt.res)+1,] = get.hclust.train.test.error(hier.mod.2, n.groups,
x.train.2, x.test.2,
as.factor(tts$train$cardio),as.factor(tts$test$cardio),
'with outliers - EDA feature selection')
hier.tt.res[nrow(hier.tt.res)+1,] = get.hclust.train.test.error(hier.mod.3, n.groups,
x.train.3, x.test.3,
as.factor(tts$train$cardio),as.factor(tts$test$cardio),
'with outliers - t stats feature selection')
hier.tt.res
| /1st_project/hierarchical_clust.R | no_license | mariajoaolavoura/sda_project | R | false | false | 15,495 | r | # Hierarchical clustering
## libraries
require(scorecard) # split_df
require(FSA)
require(factoextra) # fviz_dend
require(fields) # image.plot
require(dplyr) # %>%
require(class)
require(caret)
require(dendextend) #circle dendogram
require(circlize) #circle dendogram
require(cluster)
## seed
seed = 123
set.seed(seed)
## split ratio
split.ratio = c(0.7, 0.3)
## number of classes in target variable
n.groups = 2
## functions
accFromCm = function(pred, true) { confusionMatrix(pred, true)$overall[1] }
factorizefeatures = function(dataset){
dataset$gender = as.factor(dataset$gender)
dataset$choles = as.factor(dataset$choles)
dataset$glucose = as.factor(dataset$glucose)
dataset$smoke = as.factor(dataset$smoke)
dataset$alcohol = as.factor(dataset$alcohol)
dataset$active = as.factor(dataset$active)
dataset$cardio = as.factor(dataset$cardio)
return(dataset)
}
unfactorizefeatures = function(dataset){
dataset$gender = as.numeric(as.character(dataset$gender))
dataset$choles = as.numeric(as.character(dataset$choles))
dataset$glucose = as.numeric(as.character(dataset$glucose))
dataset$smoke = as.numeric(as.character(dataset$smoke))
dataset$alcohol = as.numeric(as.character(dataset$alcohol))
dataset$active = as.numeric(as.character(dataset$active))
dataset$cardio = as.numeric(as.character(dataset$cardio))
return(dataset)
}
reduce.data.set = function(dataset, leng, seed){
set.seed(seed)
dataset$cardio = as.factor(dataset$cardio)
dataset = dataset %>%
group_by(cardio) %>%
sample_n(size = (leng/2) )
return(dataset)
}
standardize.data.set = function(dataset){
# cannot have factorized data, must be numeric
dataset$cardio = as.numeric(as.character(dataset$cardio))
# standardization (imperative)
dataset = scale(dataset)
return(dataset) # return(data.frame(dataset))
}
feature.selection.with.t.stat = function(dataset){
dataset = data.frame(dataset)
s=c(rep(0,11)) # vector to store the values of t statistic
for(i in 1:11){
s[i] = t.test(dataset[dataset$cardio==0,i],
dataset[dataset$cardio==1,i],
var.equal=TRUE)$statistic
}
# we want the biggest t statistic
b = order(abs(s))
print(names(dataset[,b[1:3]])) # removed ones
return(dataset[,b[4:11]]) #removing the 3 lowest
}
get.hclust.train.test.error = function(model, n.groups, x.train, x.test, y.train, y.test){
# based on https://stackoverflow.com/questions/21064315/how-do-i-predict-new-datas-cluster-after-clustering-training-data
groups = cutree(model, k=n.groups)
groups = groups-1
#table(groups)
pred.train = knn(train=x.train, test=x.train, cl=groups, k=1)
pred.test = knn(train=x.train, test=x.test, cl=groups, k=1)
return(list(accFromCm(pred.train, y.train),accFromCm(pred.test, y.test)))
}
plot.image.plot = function(x, xlab, main){
image.plot(1:ncol(x), 1:nrow(x),
t(x),
col = tim.colors(500),
xlab = xlab,
ylab="Patients",
main = main,
cex.lab=1)
}
#############################################
#############################################
# Euclidean distance
setwd("C:/Users/mjlav/MEOCloud/Universidade/mestrado_up/ano1/statistic_data_analysis/project/sda_project")
## read data - no transformations on the data)
data.set= read.csv("./data/cardio_data.csv")
headtail(data.set)
## dimension reduction
data.set = reduce.data.set(data.set, 50, seed)
## split data
tts = split_df(data.set, ratio = split.ratio, seed = seed)
## standardize the data for eucidean distance
std.train = standardize.data.set(tts$train)
std.test = standardize.data.set(tts$test)
###
## complete model
x.train.1 = std.train[,-12]
x.test.1 = std.test[,-12]
# plot the data, diseases in rows and predictors in columns
plot.image.plot(x.train.1,
"age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
"main" )
heatmap(x.train.1)
# The rows are ordered based on the order of the hierarchical clustering.
# The colored bar indicates the cardio category each row belongs to.
# The color in the heatmap indicates the length of each measurement
# (from light yellow to dark red).
# plot dendograms for different methods
# label colors represent true value
colors = c("#00AFBB","#FC4E07")
maped.true.values = as.numeric(tts$train$cardio) # as.numeric because colors must be positive
df = data.frame(0,0,0,0)
names(df) = c("method", "order", "dendogram", "accuracy")
methods.list = list("ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid")
dist = daisy(x.train.1, metric="euclidean")
for(m in methods.list){
print(m)
hier.mod = hclust(dist, method=m)
label.colors = colors[maped.true.values[hier.mod$order]]
d = fviz_dend(hier.mod, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
#horiz = T,
ggtheme=theme_minimal(),
main=m)
print(d)
df[nrow(df)+1,] = list(m,
list(hier.mod$order),
list(d),
list(get.hclust.train.test.error(hier.mod, n.groups,
x.train.1, x.test.1,
as.factor(tts$train$cardio),as.factor(tts$test$cardio))))
}
# by looking at plots (data.set size = 50), ward and ward.d2 are the best
df$method[3]
df$accuracy[3]
# the accuracy of Ward.D2 is the best
euclidean.dist = daisy(x.train.1, metric ="euclidean")
hier.mod = hclust(euclidean.dist, method="ward.D2")
# color using kmeans cluster
km.clust = kmeans(x.train.1, n.groups)$cluster
label.colors = colors[km.clust[hier.mod$order]]
fviz_dend(hier.mod, k = n.groups,
k_colors = colors,
label_cols = label.colors,
cex = 0.6, main="Ward.D2 - k means coloring")
# do hierarchical classification using the ward.D2 method
# patients order
patients.order = hier.mod$order
label.colors = colors[maped.true.values[hier.mod$order]]
# draw the dendrogram.
fviz_dend(hier.mod, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
ggtheme=theme_minimal(), main="Ward.D2 - true coloring")
dend = as.dendrogram(hier.mod)
par(mar = rep(0,4))
circlize_dendrogram(dend)
# heatmap
plot.image.plot(x.train.1[patients.order,],
"age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
"patients order" )
# predictors order
euclid.dist.pred.1 = dist(t(x.train.1)) # euclidean distance
hier.mod.pred.1 = hclust(euclid.dist.pred.1, method="average")
predictors.order = hier.mod.pred.1$order
# draw the dendrogram.
fviz_dend(hier.mod.pred.1, k=n.groups, cex=0.5, k_colors = c("#00AFBB","#FC4E07"),
color_labels_by_k=TRUE, ggtheme=theme_minimal())
dend.pred.1 = as.dendrogram(hier.mod.pred.1)
par(mar = rep(0,4))
circlize_dendrogram(dend.pred.1)
# heatmap
plot.image.plot(x.train.1[,predictors.order],
"aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
"predictors order" )
# xlab must be equal to names(data.set[,predictors.order])
# patients and predictors order
plot.image.plot(x.train.1[patients.order,predictors.order],
"aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
"patients and predictors order" )
######################################################################
######################################################################
# Gowers distance
setwd("C:/Users/mjlav/MEOCloud/Universidade/mestrado_up/ano1/statistic_data_analysis/project/sda_project")
## read data - no transformations on the data)
data.set= read.csv("./data/cardio_data.csv")
headtail(data.set)
## dimension reduction
data.set = reduce.data.set(data.set, 50, seed)
## factorize the data for gowers distance (so the categorical variables are treated with nominal scale)
data.set = factorizefeatures(data.set)
## split data
tts = split_df(data.set, ratio = split.ratio, seed = seed)
###
## complete model
x.train.1 = tts$train[,-12]
x.test.1 = tts$test[,-12]
# plot the data, diseases in rows and predictors in columns
# plot.image.plot(x.train.1,
# "age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
# "main" )
# heatmap(x.train.1)
# The rows are ordered based on the order of the hierarchical clustering.
# The colored bar indicates the cardio category each row belongs to.
# The color in the heatmap indicates the length of each measurement
# (from light yellow to dark red).
# plot dendograms for different methods
colors = c("#00AFBB","#FC4E07")
maped.true.values = as.numeric(tts$train$cardio) # as.numeric because colors must be positive
label.colors = colors[maped.true.values[hier.mod$order]]
df = data.frame(0,0,0,0)
names(df) = c("method", "order", "dendogram", "accuracy")
methods.list = list("ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid")
gower.dist = daisy(x.train.1, metric ="gower")
for(m in methods.list){
print(m)
hier.mod = hclust(gower.dist, method=m)
d = fviz_dend(hier.mod, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
#horiz = T,
ggtheme=theme_minimal(),
main=m)
print(d)
df[nrow(df)+1,] = list(m,
list(hier.mod$order),
list(d),
list(get.hclust.train.test.error(hier.mod, n.groups,
x.train.1, x.test.1,
as.factor(tts$train$cardio),as.factor(tts$test$cardio))))
}
df$accuracy
df$method[3]
df$accuracy[3]
df$method[5]
df$accuracy[5]
method = "ward.D2"
# color using kmeans cluster
km.clust = kmeans(x.train.1, n.groups)$cluster
gower.dist = daisy(x.train.1, metric ="gower")
hier.mod = hclust(gower.dist, method=method)
fviz_dend(hier.mod, k = n.groups,
k_colors = c("#00AFBB","#FC4E07"),
label_cols = km.clust[hier.mod$order], cex = 0.6)
# do hierarchical classification using the ward.D2 link
# patients order
gower.dist.pat.1 = daisy(x.train.1, metric ="gower")
hier.mod.pat.1 = hclust(gower.dist.pat.1, method=method)
patients.order = hier.mod.pat.1$order
# draw the dendrogram.
fviz_dend(hier.mod.pat.1, k=n.groups, cex=0.5,
#k_colors = colors,
label_cols = label.colors,
#horiz = T,
ggtheme=theme_minimal(),
main=paste(method, " - true coloring"))
# dend.pat.1 = as.dendrogram(hier.mod.pat.1)
# par(mar = rep(0,4))
# circlize_dendrogram(dend.pat.1)
# heatmap
# works better with standardized variables...
plot.image.plot(unfactorizefeatures(x.train.1[patients.order,]),
"age,gender,height,weight,aphi,aplo,choles,glucose,smoke,alcohol,active",
"patients order" )
# # predictors order
# euclid.dist.pred.1 = dist(t(x.train.1)) # euclidean distance
# hier.mod.pred.1 = hclust(gower.dist.pred.1, method="average")
# predictors.order = hier.mod.pred.1$order
#
# # draw the dendrogram.
# fviz_dend(hier.mod.pred.1, k=n.groups, cex=0.5, k_colors = c("#00AFBB","#FC4E07"),
# color_labels_by_k=TRUE, ggtheme=theme_minimal())
#
# dend.pred.1 = as.dendrogram(hier.mod.pred.1)
# par(mar = rep(0,4))
# circlize_dendrogram(dend.pred.1)
#
# # heatmap
# plot.image.plot(x.train.1[,predictors.order],
# "aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
# "predictors order" )
#
#
# # xlab must be equal to names(data.set[,predictors.order])
# # patients and predictors order
# plot.image.plot(x.train.1[patients.order,predictors.order],
# "aplo,aphi,age,choles,glucose,active,weight,gender,height,smoke,alcohol",
# "patients and predictors order" )
## to be checked
###
## feature selection - based on EDA of cardio.r
# remove gender, smoke and alcohol
x.train.2 = tts$train[, -c(2, 9, 10, 12)]
x.test.2 = tts$test[, -c(2, 9, 10, 12)]
# plot the data, diseases in rows and predictors in columns
image.plot(1:ncol(x.train.2), 1:nrow(x.train.2), t(x.train.2), # t(x) matrix transpose
col=tim.colors(8),
xlab="alcohol,glucose,smoke,age,weight,choles,aplo,aphi", ylab="No. cardio disease",
cex.lab=1)
# Do hierarchical classification using the average link
euclid.dist.2 = dist(x.train.2) # euclidean distance
hier.mod.2 = hclust(euclid.dist.2, method="average")
# draw the dendrogram.
fviz_dend(hier.mod.2, k =n.groups, cex = 0.5, k_colors = c("#00AFBB","#FC4E07"),
color_labels_by_k = TRUE, ggtheme = theme_minimal())
###
## feature selection - based on t statistics
x.train.3 = feature.selection.with.t.stat(tts$train)
# removed "alcohol" "gender" "glucose"
names(x.train.3)
t = data.frame(tts$test)
x.test.3 = t[, names(x.train.3)]
# plot the data, diseases in rows and predictors in columns
image.plot(1:ncol(x.train.3), 1:nrow(x.train.3), t(x.train.3), # t(x) matrix transpose
col=tim.colors(8),
xlab="alcohol,glucose,smoke,age,weight,choles,aplo,aphi", ylab="No. cardio disease",
cex.lab=1)
# do hierarchical classification using the average link
euclid.dist.3 = dist(x.train.3) # euclidean distance
hier.mod.3 = hclust(euclid.dist.3, method="average")
# draw the dendrogram.
fviz_dend(hier.mod.3, k =n.groups, cex = 0.5, k_colors = c("#00AFBB","#FC4E07"),
color_labels_by_k = TRUE, ggtheme = theme_minimal())
### train test error
hier.tt.res = data.frame(0,0,0)
names(hier.tt.res) = c("method", "train.error", "test.error")
hier.tt.res[1,] = get.hclust.train.test.error(hier.mod.1, n.groups,
x.train.1, x.test.1,
as.factor(tts$train$cardio),as.factor(tts$test$cardio),
'with outliers - complete model')
hier.tt.res[nrow(hier.tt.res)+1,] = get.hclust.train.test.error(hier.mod.2, n.groups,
x.train.2, x.test.2,
as.factor(tts$train$cardio),as.factor(tts$test$cardio),
'with outliers - EDA feature selection')
hier.tt.res[nrow(hier.tt.res)+1,] = get.hclust.train.test.error(hier.mod.3, n.groups,
x.train.3, x.test.3,
as.factor(tts$train$cardio),as.factor(tts$test$cardio),
'with outliers - t stats feature selection')
hier.tt.res
|
#!/usr/bin/env Rscript
###---PACKAGES---###
if (!require("pacman")) { install.packages("pacman", repos='http://cran.us.r-project.org') }
library(pacman)
#required packages
required_packages = c(
"tidyverse",
"grid",
"ggplotify",
"svglite"
)
github_packages = c(
"slowkow/ggrepel"
)
#load packages
pacman::p_load(
char=required_packages,
install=TRUE,
character.only=TRUE,
try.bioconductor=TRUE,
update.bioconductor=TRUE
)
#load github packages
pacman::p_load_gh(
char = github_packages,
update = getOption("pac_update"),
dependencies = TRUE
)
###---GLOBAL CONFIG---###
ih_pvalue_threshold = 0.01
padj_threshold = 0.01
lfc_rna_threshold = 1
###---FUNCTIONS---###
data.in = "../data/in/"
data.in.long = "../data/in/dge/featureCounts_deseq2/table/result_lfcShrink/standardized/sirt5_kd_over_sirt5_nt/"
data.out = "../data/out/"
plot = "../plot/"
down_input=paste0(data.out,"down_genes_bart_results.txt")
up_input=paste0(data.out,"up_genes_bart_results.txt")
result_filtered = read.csv(paste0(data.in.long,"result-lfcShrink_stndrd-filt_anno-basic_padj1_lfc0.csv")) %>%
dplyr::filter(external_gene_name != "SIRT5") %>%
dplyr::filter(padj < padj_threshold) %>%
dplyr::filter(abs(log2FoldChange) > lfc_rna_threshold)
#oncogenes
oncogenes_data = read.table(file = paste0(data.in,"ongene_human.txt"), sep = '\t', header = TRUE) %>%
as_tibble()
oncogenes_names = oncogenes_data$OncogeneName
#melanoma subtype signatures
melanoma_subtype_signatures = read.csv(file = paste0(data.in,"subtype_signatures_updated.csv")) %>%
as_tibble()
melanoma_external_gene_names = melanoma_subtype_signatures$Gene
genes=result_filtered$external_gene_name
genes=c(genes,melanoma_external_gene_names,oncogenes_names)
genes_bold=c(result_filtered$external_gene_name)
set.seed(42)
clean_data = function(up_input=NULL,down_input=NULL,genes=c(),genes.bold=c()) {
up=NULL
down=NULL
ret=NULL
if(!is.null(up_input)) {
up=read.table(header=TRUE,file=up_input,sep='\t') %>%
dplyr::mutate(geneset="Factors Predicted From Up-Regulated Genes") %>%
dplyr::mutate(color_group=ifelse(irwin_hall_pvalue < ih_pvalue_threshold, "Factors Predicted From Up-Regulated Genes", "Not Significant")) %>%
dplyr::mutate(label=ifelse((TF %in% genes) & (irwin_hall_pvalue < ih_pvalue_threshold), TF, NA)) %>%
dplyr::mutate(fontface = ifelse((TF %in% genes_bold), "bold.italic","italic")) %>%
dplyr::filter(zscore > 0)
}
if(!is.null(down_input)) {
down=read.table(header=TRUE,file=down_input,sep='\t') %>%
dplyr::mutate(geneset="Factors Predicted From Down-Regulated Genes") %>%
dplyr::mutate(color_group=ifelse(irwin_hall_pvalue < ih_pvalue_threshold, "Factors Predicted From Down-Regulated Genes", "Not Significant")) %>%
dplyr::mutate(label=ifelse((TF %in% genes) & (irwin_hall_pvalue < ih_pvalue_threshold), TF, NA)) %>%
dplyr::mutate(fontface = ifelse((TF %in% genes_bold), "bold.italic","italic")) %>%
dplyr::filter(zscore > 0)
}
if(!is.null(up) & is.null(down)) {
#UP ONLY
ret=up %>% as_tibble()
} else if (is.null(up) & !is.null(down)) {
#DOWN ONLY
ret=down %>% as_tibble()
} else if (!is.null(up) & !is.null(down)) {
ret=rbind(up,down) %>% as_tibble()
}
return(ret)
}
x = clean_data(up_input = up_input, down_input = down_input,genes=genes,genes.bold=genes_bold)
signif_ovr_effect = function(cleaned_input) {
expression1=expression(italic(z)-score)
expression2=expression(-log[10](Irwin~Hall~italic(p)-value))
cols=c("Factors Predicted From Down-Regulated Genes" = "#234463","Factors Predicted From Up-Regulated Genes" = "#781e1e", "Not Significant" = "gray50")
cols2=c("Factors Predicted From Down-Regulated Genes" = "#f1f8ff", "Factors Predicted From Up-Regulated Genes" = "#fff6f6", "Not Significant" = "gray50")
df = cleaned_input %>% dplyr::filter(-log10(irwin_hall_pvalue) > 1.85)
max_zscore=max(df$zscore)
median_zscore=median(df$zscore)
max_ih=max(-log10(df$irwin_hall_pvalue))
tmpplot=ggplot(data=cleaned_input, mapping=aes(x=zscore,y=-log10(irwin_hall_pvalue))) +
geom_rect(
mapping=aes(fill = geneset),
xmin = -Inf,
xmax = Inf,
ymin = 2,
ymax = Inf
) +
geom_point(mapping=aes(color=color_group,fill=color_group),alpha=0.5) +
geom_hline(yintercept=range(-log10(0.01)), color='black', size=0.5, linetype = "dashed") +
scale_color_manual(values=cols,guide=FALSE) +
scale_fill_manual(values=cols2,guide=FALSE) +
scale_x_continuous(limits = c(0,max_zscore+0.25), expand = c(0, 0)) +
scale_y_continuous(limits = c(0,(max_ih+1)), expand = c(0, 0)) +
labs(x=expression1,y=expression2) +
ggrepel::geom_label_repel(
nudge_x = -0.3,
ylim = c(-log10(0.01),NA),
hjust=0.5,
min.segment.length = 0,
segment.square = TRUE,
segment.inflect = TRUE,
segment.curvature = -1e-20,
segment.ncp = 3,
fill=alpha("white",0.85),
mapping = aes(label = label, fontface = fontface),
box.padding = unit(0.1, "lines"),
point.padding = unit(0.3, "lines"),
size = 2,
max.iter = 1e7,
max.time = 2
) +
theme_classic() +
theme(
axis.title=element_text(size=12),
strip.text=element_text(size=12, color = "white", face="bold"),
axis.text=element_text(size=12),
axis.line = element_blank(),
panel.border = element_rect(color = "black", fill = NA, size = 1.)
) +
facet_grid(cols = vars(geneset))
#strip colors
g = ggplot_gtable(ggplot_build(tmpplot))
striprt = which( grepl('strip-t', g$layout$name) )
fills = c("#234463","#781e1e")
k = 1
for (i in striprt) {
j = which(grepl('rect', g$grobs[[i]]$grobs[[1]]$childrenOrder))
g$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill = fills[k]
k = k+1
}
return(g)
}
bart_plot = signif_ovr_effect(cleaned_input=x)
#plot
name="BART_sirt5_kd_over_sirt5_nt"
ggsave(filename=paste0(plot,name,".png"),plot=bart_plot,device="png",dpi=320,width=10,height=7)
ggsave(filename=paste0(plot,name,".svg"),plot=bart_plot,device="svg",dpi=320,width=10,height=7)
ggsave(filename=paste0(plot,name,".pdf"),plot=bart_plot,device="pdf",dpi=320,width=10,height=7)
| /figures/(5) BART Transcription Factors/R/2_bart_plot.R | no_license | monovich/giblin-sirt5-melanoma | R | false | false | 6,575 | r | #!/usr/bin/env Rscript
###---PACKAGES---###
if (!require("pacman")) { install.packages("pacman", repos='http://cran.us.r-project.org') }
library(pacman)
#required packages
required_packages = c(
"tidyverse",
"grid",
"ggplotify",
"svglite"
)
github_packages = c(
"slowkow/ggrepel"
)
#load packages
pacman::p_load(
char=required_packages,
install=TRUE,
character.only=TRUE,
try.bioconductor=TRUE,
update.bioconductor=TRUE
)
#load github packages
pacman::p_load_gh(
char = github_packages,
update = getOption("pac_update"),
dependencies = TRUE
)
###---GLOBAL CONFIG---###
ih_pvalue_threshold = 0.01
padj_threshold = 0.01
lfc_rna_threshold = 1
###---FUNCTIONS---###
data.in = "../data/in/"
data.in.long = "../data/in/dge/featureCounts_deseq2/table/result_lfcShrink/standardized/sirt5_kd_over_sirt5_nt/"
data.out = "../data/out/"
plot = "../plot/"
down_input=paste0(data.out,"down_genes_bart_results.txt")
up_input=paste0(data.out,"up_genes_bart_results.txt")
result_filtered = read.csv(paste0(data.in.long,"result-lfcShrink_stndrd-filt_anno-basic_padj1_lfc0.csv")) %>%
dplyr::filter(external_gene_name != "SIRT5") %>%
dplyr::filter(padj < padj_threshold) %>%
dplyr::filter(abs(log2FoldChange) > lfc_rna_threshold)
#oncogenes
oncogenes_data = read.table(file = paste0(data.in,"ongene_human.txt"), sep = '\t', header = TRUE) %>%
as_tibble()
oncogenes_names = oncogenes_data$OncogeneName
#melanoma subtype signatures
melanoma_subtype_signatures = read.csv(file = paste0(data.in,"subtype_signatures_updated.csv")) %>%
as_tibble()
melanoma_external_gene_names = melanoma_subtype_signatures$Gene
genes=result_filtered$external_gene_name
genes=c(genes,melanoma_external_gene_names,oncogenes_names)
genes_bold=c(result_filtered$external_gene_name)
set.seed(42)
clean_data = function(up_input=NULL,down_input=NULL,genes=c(),genes.bold=c()) {
up=NULL
down=NULL
ret=NULL
if(!is.null(up_input)) {
up=read.table(header=TRUE,file=up_input,sep='\t') %>%
dplyr::mutate(geneset="Factors Predicted From Up-Regulated Genes") %>%
dplyr::mutate(color_group=ifelse(irwin_hall_pvalue < ih_pvalue_threshold, "Factors Predicted From Up-Regulated Genes", "Not Significant")) %>%
dplyr::mutate(label=ifelse((TF %in% genes) & (irwin_hall_pvalue < ih_pvalue_threshold), TF, NA)) %>%
dplyr::mutate(fontface = ifelse((TF %in% genes_bold), "bold.italic","italic")) %>%
dplyr::filter(zscore > 0)
}
if(!is.null(down_input)) {
down=read.table(header=TRUE,file=down_input,sep='\t') %>%
dplyr::mutate(geneset="Factors Predicted From Down-Regulated Genes") %>%
dplyr::mutate(color_group=ifelse(irwin_hall_pvalue < ih_pvalue_threshold, "Factors Predicted From Down-Regulated Genes", "Not Significant")) %>%
dplyr::mutate(label=ifelse((TF %in% genes) & (irwin_hall_pvalue < ih_pvalue_threshold), TF, NA)) %>%
dplyr::mutate(fontface = ifelse((TF %in% genes_bold), "bold.italic","italic")) %>%
dplyr::filter(zscore > 0)
}
if(!is.null(up) & is.null(down)) {
#UP ONLY
ret=up %>% as_tibble()
} else if (is.null(up) & !is.null(down)) {
#DOWN ONLY
ret=down %>% as_tibble()
} else if (!is.null(up) & !is.null(down)) {
ret=rbind(up,down) %>% as_tibble()
}
return(ret)
}
x = clean_data(up_input = up_input, down_input = down_input,genes=genes,genes.bold=genes_bold)
signif_ovr_effect = function(cleaned_input) {
expression1=expression(italic(z)-score)
expression2=expression(-log[10](Irwin~Hall~italic(p)-value))
cols=c("Factors Predicted From Down-Regulated Genes" = "#234463","Factors Predicted From Up-Regulated Genes" = "#781e1e", "Not Significant" = "gray50")
cols2=c("Factors Predicted From Down-Regulated Genes" = "#f1f8ff", "Factors Predicted From Up-Regulated Genes" = "#fff6f6", "Not Significant" = "gray50")
df = cleaned_input %>% dplyr::filter(-log10(irwin_hall_pvalue) > 1.85)
max_zscore=max(df$zscore)
median_zscore=median(df$zscore)
max_ih=max(-log10(df$irwin_hall_pvalue))
tmpplot=ggplot(data=cleaned_input, mapping=aes(x=zscore,y=-log10(irwin_hall_pvalue))) +
geom_rect(
mapping=aes(fill = geneset),
xmin = -Inf,
xmax = Inf,
ymin = 2,
ymax = Inf
) +
geom_point(mapping=aes(color=color_group,fill=color_group),alpha=0.5) +
geom_hline(yintercept=range(-log10(0.01)), color='black', size=0.5, linetype = "dashed") +
scale_color_manual(values=cols,guide=FALSE) +
scale_fill_manual(values=cols2,guide=FALSE) +
scale_x_continuous(limits = c(0,max_zscore+0.25), expand = c(0, 0)) +
scale_y_continuous(limits = c(0,(max_ih+1)), expand = c(0, 0)) +
labs(x=expression1,y=expression2) +
ggrepel::geom_label_repel(
nudge_x = -0.3,
ylim = c(-log10(0.01),NA),
hjust=0.5,
min.segment.length = 0,
segment.square = TRUE,
segment.inflect = TRUE,
segment.curvature = -1e-20,
segment.ncp = 3,
fill=alpha("white",0.85),
mapping = aes(label = label, fontface = fontface),
box.padding = unit(0.1, "lines"),
point.padding = unit(0.3, "lines"),
size = 2,
max.iter = 1e7,
max.time = 2
) +
theme_classic() +
theme(
axis.title=element_text(size=12),
strip.text=element_text(size=12, color = "white", face="bold"),
axis.text=element_text(size=12),
axis.line = element_blank(),
panel.border = element_rect(color = "black", fill = NA, size = 1.)
) +
facet_grid(cols = vars(geneset))
#strip colors
g = ggplot_gtable(ggplot_build(tmpplot))
striprt = which( grepl('strip-t', g$layout$name) )
fills = c("#234463","#781e1e")
k = 1
for (i in striprt) {
j = which(grepl('rect', g$grobs[[i]]$grobs[[1]]$childrenOrder))
g$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill = fills[k]
k = k+1
}
return(g)
}
bart_plot = signif_ovr_effect(cleaned_input=x)
#plot
name="BART_sirt5_kd_over_sirt5_nt"
ggsave(filename=paste0(plot,name,".png"),plot=bart_plot,device="png",dpi=320,width=10,height=7)
ggsave(filename=paste0(plot,name,".svg"),plot=bart_plot,device="svg",dpi=320,width=10,height=7)
ggsave(filename=paste0(plot,name,".pdf"),plot=bart_plot,device="pdf",dpi=320,width=10,height=7)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_tibble.R
\name{as_tibble.gtsummary}
\alias{as_tibble.gtsummary}
\title{Convert gtsummary object to a tibble}
\usage{
\method{as_tibble}{gtsummary}(
x,
include = everything(),
col_labels = TRUE,
return_calls = FALSE,
exclude = NULL,
...
)
}
\arguments{
\item{x}{Object created by a function from the gtsummary package
(e.g. \link{tbl_summary} or \link{tbl_regression})}
\item{include}{Commands to include in output. Input may be a vector of
quoted or unquoted names. tidyselect and gtsummary select helper
functions are also accepted.
Default is \code{everything()}, which includes all commands in \code{x$kable_calls}.}
\item{col_labels}{Logical argument adding column labels to output tibble.
Default is \code{TRUE}.}
\item{return_calls}{Logical. Default is \code{FALSE}. If \code{TRUE}, the calls are returned
as a list of expressions.}
\item{exclude}{DEPRECATED}
\item{...}{Not used}
}
\value{
a \link[tibble:tibble-package]{tibble}
}
\description{
Function converts gtsummary objects tibbles. The formatting stored in
\code{x$kable_calls} is applied.
}
\examples{
tbl <-
trial \%>\%
dplyr::select(trt, age, grade, response) \%>\%
tbl_summary(by = trt)
as_tibble(tbl)
# without column labels
as_tibble(tbl, col_labels = FALSE)
}
\seealso{
Other gtsummary output types:
\code{\link{as_flextable}()},
\code{\link{as_gt}()},
\code{\link{as_huxtable.gtsummary}()},
\code{\link{as_kable_extra}()},
\code{\link{as_kable}()}
}
\author{
Daniel D. Sjoberg
}
\concept{gtsummary output types}
| /man/as_tibble.gtsummary.Rd | permissive | ClinicoPath/gtsummary | R | false | true | 1,592 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_tibble.R
\name{as_tibble.gtsummary}
\alias{as_tibble.gtsummary}
\title{Convert gtsummary object to a tibble}
\usage{
\method{as_tibble}{gtsummary}(
x,
include = everything(),
col_labels = TRUE,
return_calls = FALSE,
exclude = NULL,
...
)
}
\arguments{
\item{x}{Object created by a function from the gtsummary package
(e.g. \link{tbl_summary} or \link{tbl_regression})}
\item{include}{Commands to include in output. Input may be a vector of
quoted or unquoted names. tidyselect and gtsummary select helper
functions are also accepted.
Default is \code{everything()}, which includes all commands in \code{x$kable_calls}.}
\item{col_labels}{Logical argument adding column labels to output tibble.
Default is \code{TRUE}.}
\item{return_calls}{Logical. Default is \code{FALSE}. If \code{TRUE}, the calls are returned
as a list of expressions.}
\item{exclude}{DEPRECATED}
\item{...}{Not used}
}
\value{
a \link[tibble:tibble-package]{tibble}
}
\description{
Function converts gtsummary objects tibbles. The formatting stored in
\code{x$kable_calls} is applied.
}
\examples{
tbl <-
trial \%>\%
dplyr::select(trt, age, grade, response) \%>\%
tbl_summary(by = trt)
as_tibble(tbl)
# without column labels
as_tibble(tbl, col_labels = FALSE)
}
\seealso{
Other gtsummary output types:
\code{\link{as_flextable}()},
\code{\link{as_gt}()},
\code{\link{as_huxtable.gtsummary}()},
\code{\link{as_kable_extra}()},
\code{\link{as_kable}()}
}
\author{
Daniel D. Sjoberg
}
\concept{gtsummary output types}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.oecd_economic_regulations}
\alias{dasl.oecd_economic_regulations}
\title{OECD economic regulations}
\format{24 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/oecd-economic-regulations/?sf_paged=29}{OECD economic regulations}
}
\description{
A study by the U.S. Small Business Administration used historical data to model the GDP per capita of 24 of the countries in the Organization for Economic Cooperation and Development(OECD). The researchers hoped to show that more regulation leads to lower GDP/Capita. The multiple regression with all terms does have a significant P-value for Economic Regulation Index. However, Primary Education is not a significant predictor. If it is removed from the model, then OECD Regulation is no longer significant at .05. Was it added to the model just to judge the P-value of OECD regulation down to permit a publication that claimed an effect? Check to see whether you think there is such an effect.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
Crain, M. W., The Impact of Regulatory Costs on Small Firms, available at www.sba.gov/advocacy/7540/49291
}
\concept{Multiple Regression}
| /man/dasl.oecd_economic_regulations.Rd | no_license | sigbertklinke/mmstat.data | R | false | true | 1,346 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.oecd_economic_regulations}
\alias{dasl.oecd_economic_regulations}
\title{OECD economic regulations}
\format{24 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/oecd-economic-regulations/?sf_paged=29}{OECD economic regulations}
}
\description{
A study by the U.S. Small Business Administration used historical data to model the GDP per capita of 24 of the countries in the Organization for Economic Cooperation and Development(OECD). The researchers hoped to show that more regulation leads to lower GDP/Capita. The multiple regression with all terms does have a significant P-value for Economic Regulation Index. However, Primary Education is not a significant predictor. If it is removed from the model, then OECD Regulation is no longer significant at .05. Was it added to the model just to judge the P-value of OECD regulation down to permit a publication that claimed an effect? Check to see whether you think there is such an effect.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
Crain, M. W., The Impact of Regulatory Costs on Small Firms, available at www.sba.gov/advocacy/7540/49291
}
\concept{Multiple Regression}
|
################
# analyzing wave 6 (ie singapore) data
# with randomforest and only top 50 features of what is found by
# rf_feature_elim_WV6.R
################
source('~/GitHub/World_Values_Survey/WVS_lib.R')
# load data
source('~/GitHub/World_Values_Survey/load_WV6_for_caret.R')
# load feature elimination result, result in rfProfile
load(file=file.path(datapath, "rf_rfe_WV6.Rdata"))
#get top 50 features
xnames <- rfProfile$optVariables
#construct formula
formulaList <- paste("Happiness ~",paste(xnames, collapse="+"))
#alternate apporach: reduce dataframe size
d50 <- dnontrain[, which(names(dnontrain) %in% c(xnames, "Happiness"))]
# train setting: set in WVS_lib.R
#tuneLength <- 10
##############
# enable parallel processing
##############
require(doSNOW)
cl <- makeCluster(4, type = "SOCK")
registerDoSNOW(cl)
# train 1: basic random forest
cat("Random forest top 50 features")
set.seed(12345) # need to set same seed for all training to have same fold separation?
ptm <- proc.time()
# fitRf50 <- train(as.formula(formulaList),
# data = dnontrain,
# method = "rf",
# trControl = fitControl
# # tuneLength = tuneLength
# )
#alternate apporach: reduce dataframe size
fitRf50 <- train(Happiness ~ .,
data = d50,
method = "rf",
trControl = fitControl
# tuneLength = tuneLength
)
time1 <- proc.time()-ptm
cat(time1)
# train 2: Boruta random forest - too long
# cat("Boruta")
# set.seed(12345) # need to set same seed for all training to have same fold separation?
# ptm <- proc.time()
# fitBoruta <- train(Happiness ~ ., data = dnontrain,
# method = "Boruta",
# trControl = fitControl
# # tuneLength = tuneLength
# )
# time2 <- proc.time()-ptm
# cat(time2)
#
# resampsRfWV6 <- resamples(list(rfcleannonanswer = fitRf,
# borutacleannonanswer = fitBoruta))
#save(fitRf50, file=file.path(datapath, "rf50train_WV6.Rdata"))
# save(resampsRfWV6, fitRf, file=file.path(datapath, "rftrain_WV6.Rdata"))
#######################
# stop parallel processing
#######################
stopCluster(cl)
| /rf50_wave6.R | no_license | sonicrick/World_Values_Survey | R | false | false | 2,250 | r | ################
# analyzing wave 6 (ie singapore) data
# with randomforest and only top 50 features of what is found by
# rf_feature_elim_WV6.R
################
source('~/GitHub/World_Values_Survey/WVS_lib.R')
# load data
source('~/GitHub/World_Values_Survey/load_WV6_for_caret.R')
# load feature elimination result, result in rfProfile
load(file=file.path(datapath, "rf_rfe_WV6.Rdata"))
#get top 50 features
xnames <- rfProfile$optVariables
#construct formula
formulaList <- paste("Happiness ~",paste(xnames, collapse="+"))
#alternate apporach: reduce dataframe size
d50 <- dnontrain[, which(names(dnontrain) %in% c(xnames, "Happiness"))]
# train setting: set in WVS_lib.R
#tuneLength <- 10
##############
# enable parallel processing
##############
require(doSNOW)
cl <- makeCluster(4, type = "SOCK")
registerDoSNOW(cl)
# train 1: basic random forest
cat("Random forest top 50 features")
set.seed(12345) # need to set same seed for all training to have same fold separation?
ptm <- proc.time()
# fitRf50 <- train(as.formula(formulaList),
# data = dnontrain,
# method = "rf",
# trControl = fitControl
# # tuneLength = tuneLength
# )
#alternate apporach: reduce dataframe size
fitRf50 <- train(Happiness ~ .,
data = d50,
method = "rf",
trControl = fitControl
# tuneLength = tuneLength
)
time1 <- proc.time()-ptm
cat(time1)
# train 2: Boruta random forest - too long
# cat("Boruta")
# set.seed(12345) # need to set same seed for all training to have same fold separation?
# ptm <- proc.time()
# fitBoruta <- train(Happiness ~ ., data = dnontrain,
# method = "Boruta",
# trControl = fitControl
# # tuneLength = tuneLength
# )
# time2 <- proc.time()-ptm
# cat(time2)
#
# resampsRfWV6 <- resamples(list(rfcleannonanswer = fitRf,
# borutacleannonanswer = fitBoruta))
#save(fitRf50, file=file.path(datapath, "rf50train_WV6.Rdata"))
# save(resampsRfWV6, fitRf, file=file.path(datapath, "rftrain_WV6.Rdata"))
#######################
# stop parallel processing
#######################
stopCluster(cl)
|
margEff.censReg <- function( object, calcVCov = TRUE, returnJacobian = FALSE,
... ) {
## calculate marginal effects on E[y] at the mean explanatory variables
allPar <- coef( object, logSigma = FALSE )
# check if the model was estimated with panel data
isPanel <- "sigmaMu" %in% names( allPar )
## (not for panel data)
if( isPanel ) {
stop( "the margEff() method for objects of class 'censReg'",
" can not yet be used for panel data models" )
}
sigma <- allPar[ "sigma" ]
beta <- allPar[ ! names( allPar ) %in% c( "sigma" ) ]
if( length( object$xMean ) != length( beta ) ){
print( beta )
print( object$xMean )
stop( "cannot calculate marginal effects due to an internal error:",
" please contact the maintainer of this package" )
}
xBeta <- crossprod( object$xMean, beta )
zRight <- ( object$right - xBeta ) / sigma
zLeft <- ( object$left - xBeta ) / sigma
result <- beta[ ! names( beta ) %in% c( "(Intercept)" ) ] *
( pnorm( zRight ) - pnorm( zLeft ) )
names( result ) <-
names( beta )[ ! names( beta ) %in% c( "(Intercept)" ) ]
if( calcVCov || returnJacobian ){
# compute Jacobian matrix
jac <- matrix( 0, nrow = length( result ), ncol = length( allPar ) )
rownames( jac ) <- names( result )
colnames( jac ) <- names( allPar )
for( j in names( result ) ) {
for( k in names( allPar )[ -length( allPar ) ] ) {
jac[ j, k ] <-
( j == k ) * ( pnorm( zRight ) - pnorm( zLeft ) ) -
( beta[ j ] * object$xMean[ k ] / sigma ) *
( dnorm( zRight ) - dnorm( zLeft ) )
}
jac[ j, "sigma"] <- 0
if( is.finite( object$right ) ) {
jac[ j, "sigma"] <- jac[ j, "sigma"] - ( beta[ j ] / sigma ) *
dnorm( zRight ) * zRight
}
if( is.finite( object$left ) ) {
jac[ j, "sigma"] <- jac[ j, "sigma"] + ( beta[ j ] / sigma ) *
dnorm( zLeft ) * zLeft
}
}
if( calcVCov ) {
attr( result, "vcov" ) <-
jac %*% vcov( object, logSigma = FALSE ) %*% t( jac )
}
if( returnJacobian ) {
attr( result, "jacobian" ) <- jac
}
}
# degrees of freedom of the residuals
attr( result, "df.residual" ) <- object$df.residual
class( result ) <- c( "margEff.censReg", class( result ) )
return( result )
} | /censReg/R/margEff.censReg.R | no_license | ingted/R-Examples | R | false | false | 2,467 | r | margEff.censReg <- function( object, calcVCov = TRUE, returnJacobian = FALSE,
... ) {
## calculate marginal effects on E[y] at the mean explanatory variables
allPar <- coef( object, logSigma = FALSE )
# check if the model was estimated with panel data
isPanel <- "sigmaMu" %in% names( allPar )
## (not for panel data)
if( isPanel ) {
stop( "the margEff() method for objects of class 'censReg'",
" can not yet be used for panel data models" )
}
sigma <- allPar[ "sigma" ]
beta <- allPar[ ! names( allPar ) %in% c( "sigma" ) ]
if( length( object$xMean ) != length( beta ) ){
print( beta )
print( object$xMean )
stop( "cannot calculate marginal effects due to an internal error:",
" please contact the maintainer of this package" )
}
xBeta <- crossprod( object$xMean, beta )
zRight <- ( object$right - xBeta ) / sigma
zLeft <- ( object$left - xBeta ) / sigma
result <- beta[ ! names( beta ) %in% c( "(Intercept)" ) ] *
( pnorm( zRight ) - pnorm( zLeft ) )
names( result ) <-
names( beta )[ ! names( beta ) %in% c( "(Intercept)" ) ]
if( calcVCov || returnJacobian ){
# compute Jacobian matrix
jac <- matrix( 0, nrow = length( result ), ncol = length( allPar ) )
rownames( jac ) <- names( result )
colnames( jac ) <- names( allPar )
for( j in names( result ) ) {
for( k in names( allPar )[ -length( allPar ) ] ) {
jac[ j, k ] <-
( j == k ) * ( pnorm( zRight ) - pnorm( zLeft ) ) -
( beta[ j ] * object$xMean[ k ] / sigma ) *
( dnorm( zRight ) - dnorm( zLeft ) )
}
jac[ j, "sigma"] <- 0
if( is.finite( object$right ) ) {
jac[ j, "sigma"] <- jac[ j, "sigma"] - ( beta[ j ] / sigma ) *
dnorm( zRight ) * zRight
}
if( is.finite( object$left ) ) {
jac[ j, "sigma"] <- jac[ j, "sigma"] + ( beta[ j ] / sigma ) *
dnorm( zLeft ) * zLeft
}
}
if( calcVCov ) {
attr( result, "vcov" ) <-
jac %*% vcov( object, logSigma = FALSE ) %*% t( jac )
}
if( returnJacobian ) {
attr( result, "jacobian" ) <- jac
}
}
# degrees of freedom of the residuals
attr( result, "df.residual" ) <- object$df.residual
class( result ) <- c( "margEff.censReg", class( result ) )
return( result )
} |
# getdata-008
# project requirements:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each
# measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
# the LaF library allows us to quickly obtain a handle on the large fixed-width
# files in this data set. Using read.fwf takes too long.
library(LaF)
library(data.table)
# first download and unzip the data if we haven't already
local_file<-"getdata-projectfiles-UCI-HAR-Dataset.zip"
remote_file_url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
unzip_directory<-"UCI HAR Dataset"
if (!file.exists(local_file)) {
download.file(remote_file_url,destfile=local_file,method="curl")
}
if(!file.exists(unzip_directory)) {
unzip(local_file)
}
####
# get feature names and activity names into vectors
feature_names_df<-fread(paste(unzip_directory,"/features.txt", sep = ""))
feature_names<-feature_names_df$V2
activity_names_df<-fread(paste(unzip_directory,"/activity_labels.txt", sep =""))
activity_names<-activity_names_df$V2
####
# part 2 of the project requires us to select only the mean and std measurements,
# so get a list of which indexes those are in the features list
feature_names_mean_std_idx<-grep("mean\\(|std\\(",feature_names)
####
# part 4 requires us to tidy up the variable names, so we'll clean that
# up here.
# first remove the ()'s
feature_names<-gsub("\\(\\)","",feature_names)
# replace - with _ as we do not want to confuse the subtraction operator with
# variable names. We will not be forcing all lower-case or removing the separator
# entirely because that makes the variables too long and unreadable.
feature_names<-gsub("-","_",feature_names)
# now fix the incorrectly-named variables with the dupe string BodyBody in them
feature_names<-gsub("BodyBody","Body",feature_names)
# replace initial "t" with "time" and "f" with "freq" to be more descriptive
feature_names<-sub("^t","time",feature_names)
feature_names<-sub("^f","freq",feature_names)
# now make it all lowercase
feature_names<-tolower(feature_names)
####
# Merge the training and the test sets to create one data frame, including named
# activities (as a factor) and a column for the subject.
# This next section effectively deals with parts 1-4 of the project. We take care
# of test data first then repeat for training data before combining them at the end.
####
# read in the fixed-width X_test.txt file and label the columns appropriately
# based on the names in the features.txt file read in and tidied up
# above (part 4 of the project)
test_data_handle<-laf_open_fwf(paste(unzip_directory,"/test/X_test.txt",sep = ""),
column_widths=c(rep(16,561)),
column_types=rep("numeric", 561),
column_names=feature_names)
# create a data frame called test_data that includes only the mean/std variables
# we care about (part 2 of the project)
test_data<-test_data_handle[,feature_names_mean_std_idx]
# create a column in test_data with the integer representing the subject
test_subjects<-fread(paste(unzip_directory,"/test/subject_test.txt",sep=""),
data.table=FALSE)
test_data<-cbind("subject"=test_subjects$V1,test_data)
# create a factor column in the dataframe using the activity names (part 3 of
# the project)
test_activities<-fread(paste(unzip_directory,"/test/y_test.txt",sep=""),
data.table=FALSE)
test_data<-cbind("activityname"=cut(test_activities$V1,6,labels=activity_names),
test_data)
# now we do the same with train data that we just did with the test data above
train_data_handle<-laf_open_fwf(paste(unzip_directory,"/train/X_train.txt",sep = ""),
column_widths=c(rep(16,561)),
column_types=rep("numeric", 561),
column_names=feature_names)
train_data<-train_data_handle[,feature_names_mean_std_idx]
train_subjects<-fread(paste(unzip_directory,"/train/subject_train.txt",sep=""),
data.table=FALSE)
train_data<-cbind("subject"=train_subjects$V1,train_data)
train_activities<-fread(paste(unzip_directory,"/train/y_train.txt",sep=""),
data.table=FALSE)
train_data<-cbind("activityname"=cut(train_activities$V1,6,labels=activity_names),
train_data)
# now merge the two sets (train and test) into a single dataframe (part 1
# of the project)
all_data<-rbind(test_data,train_data)
####
# part 5 - create an independent tidy data set with the average of each
# variable for each activity and each subject
library(reshape2)
all_data_melt<-melt(all_data,id=c("subject","activityname"))
all_data_cast<-dcast(all_data_melt, activityname + subject ~ variable, mean)
# write out the table for uploading a project deliverable
write.table(all_data_cast, file="getdata-008-project-results.txt",sep=" ",
row.names = FALSE)
| /run_analysis.R | no_license | mcodd/coursera-getdata-008 | R | false | false | 5,253 | r | # getdata-008
# project requirements:
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each
# measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
# the LaF library allows us to quickly obtain a handle on the large fixed-width
# files in this data set. Using read.fwf takes too long.
library(LaF)
library(data.table)
# first download and unzip the data if we haven't already
local_file<-"getdata-projectfiles-UCI-HAR-Dataset.zip"
remote_file_url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
unzip_directory<-"UCI HAR Dataset"
if (!file.exists(local_file)) {
download.file(remote_file_url,destfile=local_file,method="curl")
}
if(!file.exists(unzip_directory)) {
unzip(local_file)
}
####
# get feature names and activity names into vectors
feature_names_df<-fread(paste(unzip_directory,"/features.txt", sep = ""))
feature_names<-feature_names_df$V2
activity_names_df<-fread(paste(unzip_directory,"/activity_labels.txt", sep =""))
activity_names<-activity_names_df$V2
####
# part 2 of the project requires us to select only the mean and std measurements,
# so get a list of which indexes those are in the features list
feature_names_mean_std_idx<-grep("mean\\(|std\\(",feature_names)
####
# part 4 requires us to tidy up the variable names, so we'll clean that
# up here.
# first remove the ()'s
feature_names<-gsub("\\(\\)","",feature_names)
# replace - with _ as we do not want to confuse the subtraction operator with
# variable names. We will not be forcing all lower-case or removing the separator
# entirely because that makes the variables too long and unreadable.
feature_names<-gsub("-","_",feature_names)
# now fix the incorrectly-named variables with the dupe string BodyBody in them
feature_names<-gsub("BodyBody","Body",feature_names)
# replace initial "t" with "time" and "f" with "freq" to be more descriptive
feature_names<-sub("^t","time",feature_names)
feature_names<-sub("^f","freq",feature_names)
# now make it all lowercase
feature_names<-tolower(feature_names)
####
# Merge the training and the test sets to create one data frame, including named
# activities (as a factor) and a column for the subject.
# This next section effectively deals with parts 1-4 of the project. We take care
# of test data first then repeat for training data before combining them at the end.
####
# read in the fixed-width X_test.txt file and label the columns appropriately
# based on the names in the features.txt file read in and tidied up
# above (part 4 of the project)
test_data_handle<-laf_open_fwf(paste(unzip_directory,"/test/X_test.txt",sep = ""),
column_widths=c(rep(16,561)),
column_types=rep("numeric", 561),
column_names=feature_names)
# create a data frame called test_data that includes only the mean/std variables
# we care about (part 2 of the project)
test_data<-test_data_handle[,feature_names_mean_std_idx]
# create a column in test_data with the integer representing the subject
test_subjects<-fread(paste(unzip_directory,"/test/subject_test.txt",sep=""),
data.table=FALSE)
test_data<-cbind("subject"=test_subjects$V1,test_data)
# create a factor column in the dataframe using the activity names (part 3 of
# the project)
test_activities<-fread(paste(unzip_directory,"/test/y_test.txt",sep=""),
data.table=FALSE)
test_data<-cbind("activityname"=cut(test_activities$V1,6,labels=activity_names),
test_data)
# now we do the same with train data that we just did with the test data above
train_data_handle<-laf_open_fwf(paste(unzip_directory,"/train/X_train.txt",sep = ""),
column_widths=c(rep(16,561)),
column_types=rep("numeric", 561),
column_names=feature_names)
train_data<-train_data_handle[,feature_names_mean_std_idx]
train_subjects<-fread(paste(unzip_directory,"/train/subject_train.txt",sep=""),
data.table=FALSE)
train_data<-cbind("subject"=train_subjects$V1,train_data)
train_activities<-fread(paste(unzip_directory,"/train/y_train.txt",sep=""),
data.table=FALSE)
train_data<-cbind("activityname"=cut(train_activities$V1,6,labels=activity_names),
train_data)
# now merge the two sets (train and test) into a single dataframe (part 1
# of the project)
all_data<-rbind(test_data,train_data)
####
# part 5 - create an independent tidy data set with the average of each
# variable for each activity and each subject
library(reshape2)
all_data_melt<-melt(all_data,id=c("subject","activityname"))
all_data_cast<-dcast(all_data_melt, activityname + subject ~ variable, mean)
# write out the table for uploading a project deliverable
write.table(all_data_cast, file="getdata-008-project-results.txt",sep=" ",
row.names = FALSE)
|
k <- 1
n <- 15
tp1 <- .1
tp2 <- .1
| /Models/Bayesian_Cognitive_Modeling/ParameterEstimation/Binomial/Rate_4/Rate_4.data.R | no_license | wmmurrah/cognitivemodeling | R | false | false | 35 | r | k <- 1
n <- 15
tp1 <- .1
tp2 <- .1
|
#
# Elementary Effects Study Design
#
rm(list=ls()) # clear workspace
R_LIBS= ("/home/R/library") # set path for R libraries
options(scipen=999) # turn scientific notation off
options(stringsAsFactors = FALSE) # turn off representing strings as factors
# Set parameters for each study
setwd ("~/GitHub/epa-biogas-rin/studies/FY18/ee trouble shoot/wesys studies/")
r <- 500 # number of trajectories for EE study
# Load libraries
library(sensitivity)
library(gdata)
library (dplyr)
library (data.table)
# 1. Load Excel Input
vars <- read.csv ("ca.500traj.ee.study.design.csv")
vars <- vars[,1:5]
vars <- mutate(vars, new.var.names = paste(factor, sep=":"))
vars$delta <- vars$max - vars$min
## 2. Generate a Study Design
set.seed(12340)
# Morris 2r sample points per input:
# total = r(k + 1) samples, where
# k = the number of inputs
# r = the number of trajectories (reps)
# N = number of factors
N <- nrow(vars)
myGenerate <- function (X){
rep(1,dim(X)[2])
}
# Generate the Morris Study Design
SA.design <- morris(model = myGenerate, factors = N, r = r,
design = list(type = "oat", levels = 6, grid.jump=1))$X
# Save the Morris Study Design
save(SA.design, file = "sa.design.ca.RDA")
# 3. Transform Data
# Each column represents a unique Variable
# Rows represent the individual runs.
a <- t(vars$set)
b <- 1 : dim(SA.design)[2]
z <- matrix(a, nrow=length(b), ncol=length(a), byrow=TRUE)
zz <- apply(b == z, c(1,2), function(x) {if (x) 1 else 0})
w <- SA.design %*% zz
new.design <- w * matrix(vars$delta, nrow=dim(SA.design)[1], ncol=length(a), byrow=TRUE) + matrix(vars$min, nrow=dim(SA.design)[1], ncol=length(a), byrow=TRUE)
colnames(new.design) <- vars$new.var.names
ee.design <- melt(t(new.design))
colnames(ee.design) <- c("variable", "run", "value")
## 4. Save Transformed Outputs
# # These outputs are inputs for the WESyS model
save (ee.design, file = results_rda_filepath)
}
| /WESyS/epa-biogas-rin-HTL-TEA/studies/FY18/ee trouble shoot/wesys studies/1.studydesign.R | no_license | irinatsiryapkina/work | R | false | false | 2,108 | r | #
# Elementary Effects Study Design
#
rm(list=ls()) # clear workspace
R_LIBS= ("/home/R/library") # set path for R libraries
options(scipen=999) # turn scientific notation off
options(stringsAsFactors = FALSE) # turn off representing strings as factors
# Set parameters for each study
setwd ("~/GitHub/epa-biogas-rin/studies/FY18/ee trouble shoot/wesys studies/")
r <- 500 # number of trajectories for EE study
# Load libraries
library(sensitivity)
library(gdata)
library (dplyr)
library (data.table)
# 1. Load Excel Input
vars <- read.csv ("ca.500traj.ee.study.design.csv")
vars <- vars[,1:5]
vars <- mutate(vars, new.var.names = paste(factor, sep=":"))
vars$delta <- vars$max - vars$min
## 2. Generate a Study Design
set.seed(12340)
# Morris 2r sample points per input:
# total = r(k + 1) samples, where
# k = the number of inputs
# r = the number of trajectories (reps)
# N = number of factors
N <- nrow(vars)
myGenerate <- function (X){
rep(1,dim(X)[2])
}
# Generate the Morris Study Design
SA.design <- morris(model = myGenerate, factors = N, r = r,
design = list(type = "oat", levels = 6, grid.jump=1))$X
# Save the Morris Study Design
save(SA.design, file = "sa.design.ca.RDA")
# 3. Transform Data
# Each column represents a unique Variable
# Rows represent the individual runs.
a <- t(vars$set)
b <- 1 : dim(SA.design)[2]
z <- matrix(a, nrow=length(b), ncol=length(a), byrow=TRUE)
zz <- apply(b == z, c(1,2), function(x) {if (x) 1 else 0})
w <- SA.design %*% zz
new.design <- w * matrix(vars$delta, nrow=dim(SA.design)[1], ncol=length(a), byrow=TRUE) + matrix(vars$min, nrow=dim(SA.design)[1], ncol=length(a), byrow=TRUE)
colnames(new.design) <- vars$new.var.names
ee.design <- melt(t(new.design))
colnames(ee.design) <- c("variable", "run", "value")
## 4. Save Transformed Outputs
# # These outputs are inputs for the WESyS model
save (ee.design, file = results_rda_filepath)
}
|
# Perform sensitivity analysis for the quantile used to estimate
# the top of the sphere simultaneously with the cutoff value k
# to determine whether someone has removed their device.
# Vary |H_i| from 0.9 T_i ... 0.99 T_i
# check angular change in upright orientation
# check change in classifications
###########################################################
# Check Angular Change in Upright Orientation
rm(list = ls())
data.dir <- file.path("Data/Data_SMASH_ZIO/OneMinute_Data_2021-06-21")
raw.file <- dir(data.dir,full.names = T)
## Packages
library(lubridate)
library(dplyr)
library(ggplot2)
library(gridExtra)
chord2theta <- function(chord) 2*asin(chord/2)/pi*180
df <- tibble()
for(i in raw.file){
## Grab one raw data file
min.data <- read.csv(i)
df <-
min.data %>%
mutate(down0 = as.numeric(theta > 45)) %>%
group_by(wear.bout, cluster.meanshift.14) %>%
mutate(down.meanshift.14 = down0 | (median(theta)>45)) %>%
ungroup %>%
group_by(wear.bout, cluster.centroid7) %>%
mutate(down.centroid7 = down0 | (median(theta)>45)) %>%
ungroup %>%
group_by(wear.bout, cluster.ward5) %>%
mutate(down.ward5 = down0 | (median(theta)>45)) %>%
ungroup %>%
summarise(msc = mean(down.meanshift.14, na.rm=T),
chc = mean(down.centroid7, na.rm=T),
whc = mean(down.ward5, na.rm=T),
msc.chc = mean(down.meanshift.14*down.centroid7, na.rm=T) +
mean((1-down.meanshift.14)*(1-down.centroid7), na.rm=T),
msc.whc = mean(down.meanshift.14*down.ward5, na.rm=T) +
mean((1-down.meanshift.14)*(1-down.ward5), na.rm=T),
chc.whc = mean(down.centroid7*down.ward5, na.rm=T) +
mean((1-down.centroid7)*(1-down.ward5), na.rm=T)) %>%
bind_rows(df)
}
summary(df$msc.chc)
summary(df$msc.whc)
summary(df$chc.whc)
#> summary(df$msc.chc)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.8691 0.9818 0.9941 0.9821 0.9971 0.9995
#> summary(df$msc.whc)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.8839 0.9949 0.9981 0.9882 0.9995 1.0000
#> summary(df$chc.whc)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.8695 0.9853 0.9934 0.9741 0.9985 0.9996
| /Code/Building_Set_14/2c_clustering__Sensitivity_Analysis.R | no_license | etzkorn/postuR_analysis | R | false | false | 2,290 | r | # Perform sensitivity analysis for the quantile used to estimate
# the top of the sphere simultaneously with the cutoff value k
# to determine whether someone has removed their device.
# Vary |H_i| from 0.9 T_i ... 0.99 T_i
# check angular change in upright orientation
# check change in classifications
###########################################################
# Check Angular Change in Upright Orientation
rm(list = ls())
data.dir <- file.path("Data/Data_SMASH_ZIO/OneMinute_Data_2021-06-21")
raw.file <- dir(data.dir,full.names = T)
## Packages
library(lubridate)
library(dplyr)
library(ggplot2)
library(gridExtra)
chord2theta <- function(chord) 2*asin(chord/2)/pi*180
df <- tibble()
for(i in raw.file){
## Grab one raw data file
min.data <- read.csv(i)
df <-
min.data %>%
mutate(down0 = as.numeric(theta > 45)) %>%
group_by(wear.bout, cluster.meanshift.14) %>%
mutate(down.meanshift.14 = down0 | (median(theta)>45)) %>%
ungroup %>%
group_by(wear.bout, cluster.centroid7) %>%
mutate(down.centroid7 = down0 | (median(theta)>45)) %>%
ungroup %>%
group_by(wear.bout, cluster.ward5) %>%
mutate(down.ward5 = down0 | (median(theta)>45)) %>%
ungroup %>%
summarise(msc = mean(down.meanshift.14, na.rm=T),
chc = mean(down.centroid7, na.rm=T),
whc = mean(down.ward5, na.rm=T),
msc.chc = mean(down.meanshift.14*down.centroid7, na.rm=T) +
mean((1-down.meanshift.14)*(1-down.centroid7), na.rm=T),
msc.whc = mean(down.meanshift.14*down.ward5, na.rm=T) +
mean((1-down.meanshift.14)*(1-down.ward5), na.rm=T),
chc.whc = mean(down.centroid7*down.ward5, na.rm=T) +
mean((1-down.centroid7)*(1-down.ward5), na.rm=T)) %>%
bind_rows(df)
}
summary(df$msc.chc)
summary(df$msc.whc)
summary(df$chc.whc)
#> summary(df$msc.chc)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.8691 0.9818 0.9941 0.9821 0.9971 0.9995
#> summary(df$msc.whc)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.8839 0.9949 0.9981 0.9882 0.9995 1.0000
#> summary(df$chc.whc)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.8695 0.9853 0.9934 0.9741 0.9985 0.9996
|
library(shiny)
library(shinymetrum)
ui <- metworxApp(
metworxTitle = "Sample App",
#-- standard shiny UI code --#
fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput('nDraws', '# of Draws', 5, 100, 50)
),
mainPanel(
plotOutput('randNorm')
)
)
)
#-- standard shiny UI code --#
)
server <- function(input, output) {
output$randNorm <- renderPlot({
plot(density(rnorm(input$nDraws)))
})
}
shinyApp(ui = ui, server = server)
| /tests/metworxApp/app.R | no_license | anhnguyendepocen/shinymetrum | R | false | false | 506 | r | library(shiny)
library(shinymetrum)
ui <- metworxApp(
metworxTitle = "Sample App",
#-- standard shiny UI code --#
fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput('nDraws', '# of Draws', 5, 100, 50)
),
mainPanel(
plotOutput('randNorm')
)
)
)
#-- standard shiny UI code --#
)
server <- function(input, output) {
output$randNorm <- renderPlot({
plot(density(rnorm(input$nDraws)))
})
}
shinyApp(ui = ui, server = server)
|
library(tidyverse)
#set working directory
setwd("Y:/Julia Crunden/Results/Plate Reader/Optimising Cys + Met concentrations for Met3p repressor strain/2020-10-23/Comparison of nitrogen source for Radicicol potency")
#read in the file
df <- read.csv("radicicol potency with Am sulf or MSG.txt", header=TRUE, sep="\t")
#This uses tidyr to rearrange the data.
#Selects the time in seconds which start with X, disregards the X and makes it a column named "time"
#Makes column named OD
#Changes the type of data that the time is, from character to numeric
df2 <- pivot_longer(df, starts_with("X"), names_to = "time", names_prefix = ("X"), values_to = "OD")
df2$time <- as.numeric(df2$time)
df2
#This prints a line plot of df2 with OD against time and with each Sample name as a different name
ggplot(df2, aes(x= time, y= OD, group = Sample)) +
#Sets colour of line to red
geom_line(aes(colour = Sample)) +
#removes legend
theme_light() +
#turns the x acis labels 90 degrees so they aren't overlapping
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
# changes the X axis scale. From 0 to 48 in increments of 12
scale_x_continuous(breaks = seq(0, 48, by = 12)) +
#labels the x axis
labs(x = "Time (hours)")
#Saves a png file of the plot
ggsave("Cys and met data.png", width = 10, height = 5, dpi = 600)
| /Multiple growth curves on one plot/Multiple growth curves on one plot.R | no_license | jcrunden/Microbiology | R | false | false | 1,357 | r | library(tidyverse)
#set working directory
setwd("Y:/Julia Crunden/Results/Plate Reader/Optimising Cys + Met concentrations for Met3p repressor strain/2020-10-23/Comparison of nitrogen source for Radicicol potency")
#read in the file
df <- read.csv("radicicol potency with Am sulf or MSG.txt", header=TRUE, sep="\t")
#This uses tidyr to rearrange the data.
#Selects the time in seconds which start with X, disregards the X and makes it a column named "time"
#Makes column named OD
#Changes the type of data that the time is, from character to numeric
df2 <- pivot_longer(df, starts_with("X"), names_to = "time", names_prefix = ("X"), values_to = "OD")
df2$time <- as.numeric(df2$time)
df2
#This prints a line plot of df2 with OD against time and with each Sample name as a different name
ggplot(df2, aes(x= time, y= OD, group = Sample)) +
#Sets colour of line to red
geom_line(aes(colour = Sample)) +
#removes legend
theme_light() +
#turns the x acis labels 90 degrees so they aren't overlapping
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
# changes the X axis scale. From 0 to 48 in increments of 12
scale_x_continuous(breaks = seq(0, 48, by = 12)) +
#labels the x axis
labs(x = "Time (hours)")
#Saves a png file of the plot
ggsave("Cys and met data.png", width = 10, height = 5, dpi = 600)
|
# Statistics for Linguists: An Introduction Using R
# Code presented inside Chapter 12
# --------------------------------------------------------
# 12.1. Theoretical background: Data-generating processes
# Applying the logistic function to a few values:
plogis(-2)
plogis(0)
plogis(2)
# Even if you enter extremely large or small values...
# ... the logistic function never goes beyond 0/1.
# --------------------------------------------------------
# 12.4. Speech errors and blood alcohol concentration
# Load tidyverse and broom package:
library(tidyverse)
library(broom)
# Load the speech error data:
alcohol <- read_csv('speech_errors.csv')
# Check:
alcohol
# Fit a logistic regression model:
alcohol_mdl <- glm(speech_error ~ BAC,
data = alcohol, family = 'binomial')
# Check output:
tidy(alcohol_mdl)
# Extract intercept and coefficient:
intercept <- tidy(alcohol_mdl)$estimate[1]
slope <- tidy(alcohol_mdl)$estimate[2]
# Check:
intercept
slope
# Calculate log odds for 0 and 0.3 blood alcohol:
intercept + slope * 0 # BAC = 0
intercept + slope * 0.3 # BAC = 0.3
# Same, but apply logistic for probabilities:
plogis(intercept + slope * 0)
plogis(intercept + slope * 0.3)
# Create a sequence of BAC values for plotting the model:
BAC_vals <- seq(0, 0.4, 0.01)
# Calculate fitted values:
y_preds <- plogis(intercept + slope * BAC_vals)
# Put this into a new tibble:
mdl_preds <- tibble(BAC_vals, y_preds)
mdl_preds
# Make a plot of data and model:
ggplot(alcohol, aes(x = BAC, y = speech_error)) +
geom_point(size = 4, alpha = 0.6) +
geom_line(data = mdl_preds,
aes(x = BAC_vals, y = y_preds)) +
theme_minimal()
# --------------------------------------------------------
# 12.5. Predicting the dative alternation:
# Get the dative dataset from the languageR package:
library(languageR)
# Check first two rows:
head(dative, 2)
# Tabulate the response:
table(dative$RealizationOfRecipient)
# Make a model of dative as a function of animacy:
dative_mdl <- glm(RealizationOfRecipient ~ AnimacyOfRec,
data = dative, family = 'binomial')
# Look at coefficients:
tidy(dative_mdl)
# Check the order of levels:
levels(dative$RealizationOfRecipient)
# Extract coefficients:
intercept <- tidy(dative_mdl)$estimate[1]
slope <- tidy(dative_mdl)$estimate[2]
# Check:
intercept
slope
# Calculate predictions for animates and inanimates:
plogis(intercept + slope * 0)
plogis(intercept + slope * 1)
animate_pred <- b0 + b1 * 0
inanimate_pred <- b0 + b1 * 1
# Log odds:
animate_pred
inanimate_pred
# Probabilities:
plogis(animate_pred)
plogis(inanimate_pred)
# --------------------------------------------------------
# 12.6. Analyzing gesture perception: Hassemer & Winter (2016)
# 12.6.1. Exploring the dataset:
# Load data and check:
ges <- read_csv('hassemer_winter_2016_gesture.csv')
ges
# Tabulate distribution of participants over conditions:
table(ges$pinkie_curl)
# Tabulate overall responses:
table(ges$choice)
# Proportion of choices:
table(ges$choice) / sum(table(ges$choice))
# Another way to compute proportions:
prop.table(table(ges$choice))
# Tabulate response choice against pinkie curl condition:
xtab <- table(ges$pinkie_curl, ges$choice)
xtab
# Row-wise proportions:
xtab / rowSums(xtab)
# Another way to compute row-wise proportions:
round(prop.table(xtab, margin = 1), digits = 2)
# 12.6.2. Logistic regression analysis:
# The following yields an error...
ges_mdl <- glm(choice ~ pinkie_curl, data = ges) # error
# ...because the glm() doesn't know which GLM t run.
# Let's supply the family argument:
ges_mdl <- glm(choice ~ pinkie_curl,
data = ges, family = 'binomial') # error
# The 'choice' column is a character but needs to be factor.
# Convert it to factor:
ges <- mutate(ges, choice = factor(choice))
# Check:
class(ges$choice)
# Check order of levels:
levels(ges$choice)
# Fit the logistic regression model:
ges_mdl <- glm(choice ~ pinkie_curl, data = ges,
family = 'binomial')
# Interpret coefficients:
tidy(ges_mdl)
# Create tibble for predict():
ges_preds <- tibble(pinkie_curl = 1:9)
# Get predicted log odds:
predict(ges_mdl, ges_preds)
# Or probabilities:
plogis(predict(ges_mdl, ges_preds))
# Alternative way to get probabilities:
predict(ges_mdl, ges_preds, type = 'response')
# Extract predictions and compute 95% confidence interval:
ges_preds <- as_tibble(predict(ges_mdl,
ges_preds,
se.fit = TRUE)[1:2]) %>%
mutate(prob = plogis(fit),
LB = plogis(fit - 1.96 * se.fit),
UB = plogis(fit + 1.96 * se.fit)) %>%
bind_cols(ges_preds)
# Make a plot of these predictions:
ges_preds %>% ggplot(aes(x = pinkie_curl, y = prob)) +
geom_point(size = 3) +
geom_errorbar(aes(ymin = LB, ymax = UB), width = 0.5) +
scale_x_continuous(breaks = 1:9) +
xlab('Pinkie curl') +
ylab('p(y = Shape)') +
theme_minimal()
| /textbook/scripts/chapter12.R | no_license | mahowak/LING104 | R | false | false | 5,017 | r | # Statistics for Linguists: An Introduction Using R
# Code presented inside Chapter 12
# --------------------------------------------------------
# 12.1. Theoretical background: Data-generating processes
# Applying the logistic function to a few values:
plogis(-2)
plogis(0)
plogis(2)
# Even if you enter extremely large or small values...
# ... the logistic function never goes beyond 0/1.
# --------------------------------------------------------
# 12.4. Speech errors and blood alcohol concentration
# Load tidyverse and broom package:
library(tidyverse)
library(broom)
# Load the speech error data:
alcohol <- read_csv('speech_errors.csv')
# Check:
alcohol
# Fit a logistic regression model:
alcohol_mdl <- glm(speech_error ~ BAC,
data = alcohol, family = 'binomial')
# Check output:
tidy(alcohol_mdl)
# Extract intercept and coefficient:
intercept <- tidy(alcohol_mdl)$estimate[1]
slope <- tidy(alcohol_mdl)$estimate[2]
# Check:
intercept
slope
# Calculate log odds for 0 and 0.3 blood alcohol:
intercept + slope * 0 # BAC = 0
intercept + slope * 0.3 # BAC = 0.3
# Same, but apply logistic for probabilities:
plogis(intercept + slope * 0)
plogis(intercept + slope * 0.3)
# Create a sequence of BAC values for plotting the model:
BAC_vals <- seq(0, 0.4, 0.01)
# Calculate fitted values:
y_preds <- plogis(intercept + slope * BAC_vals)
# Put this into a new tibble:
mdl_preds <- tibble(BAC_vals, y_preds)
mdl_preds
# Make a plot of data and model:
ggplot(alcohol, aes(x = BAC, y = speech_error)) +
geom_point(size = 4, alpha = 0.6) +
geom_line(data = mdl_preds,
aes(x = BAC_vals, y = y_preds)) +
theme_minimal()
# --------------------------------------------------------
# 12.5. Predicting the dative alternation:
# Get the dative dataset from the languageR package:
library(languageR)
# Check first two rows:
head(dative, 2)
# Tabulate the response:
table(dative$RealizationOfRecipient)
# Make a model of dative as a function of animacy:
dative_mdl <- glm(RealizationOfRecipient ~ AnimacyOfRec,
data = dative, family = 'binomial')
# Look at coefficients:
tidy(dative_mdl)
# Check the order of levels:
levels(dative$RealizationOfRecipient)
# Extract coefficients:
intercept <- tidy(dative_mdl)$estimate[1]
slope <- tidy(dative_mdl)$estimate[2]
# Check:
intercept
slope
# Calculate predictions for animates and inanimates:
plogis(intercept + slope * 0)
plogis(intercept + slope * 1)
animate_pred <- b0 + b1 * 0
inanimate_pred <- b0 + b1 * 1
# Log odds:
animate_pred
inanimate_pred
# Probabilities:
plogis(animate_pred)
plogis(inanimate_pred)
# --------------------------------------------------------
# 12.6. Analyzing gesture perception: Hassemer & Winter (2016)
# 12.6.1. Exploring the dataset:
# Load data and check:
ges <- read_csv('hassemer_winter_2016_gesture.csv')
ges
# Tabulate distribution of participants over conditions:
table(ges$pinkie_curl)
# Tabulate overall responses:
table(ges$choice)
# Proportion of choices:
table(ges$choice) / sum(table(ges$choice))
# Another way to compute proportions:
prop.table(table(ges$choice))
# Tabulate response choice against pinkie curl condition:
xtab <- table(ges$pinkie_curl, ges$choice)
xtab
# Row-wise proportions:
xtab / rowSums(xtab)
# Another way to compute row-wise proportions:
round(prop.table(xtab, margin = 1), digits = 2)
# 12.6.2. Logistic regression analysis:
# The following yields an error...
ges_mdl <- glm(choice ~ pinkie_curl, data = ges) # error
# ...because the glm() doesn't know which GLM t run.
# Let's supply the family argument:
ges_mdl <- glm(choice ~ pinkie_curl,
data = ges, family = 'binomial') # error
# The 'choice' column is a character but needs to be factor.
# Convert it to factor:
ges <- mutate(ges, choice = factor(choice))
# Check:
class(ges$choice)
# Check order of levels:
levels(ges$choice)
# Fit the logistic regression model:
ges_mdl <- glm(choice ~ pinkie_curl, data = ges,
family = 'binomial')
# Interpret coefficients:
tidy(ges_mdl)
# Create tibble for predict():
ges_preds <- tibble(pinkie_curl = 1:9)
# Get predicted log odds:
predict(ges_mdl, ges_preds)
# Or probabilities:
plogis(predict(ges_mdl, ges_preds))
# Alternative way to get probabilities:
predict(ges_mdl, ges_preds, type = 'response')
# Extract predictions and compute 95% confidence interval:
ges_preds <- as_tibble(predict(ges_mdl,
ges_preds,
se.fit = TRUE)[1:2]) %>%
mutate(prob = plogis(fit),
LB = plogis(fit - 1.96 * se.fit),
UB = plogis(fit + 1.96 * se.fit)) %>%
bind_cols(ges_preds)
# Make a plot of these predictions:
ges_preds %>% ggplot(aes(x = pinkie_curl, y = prob)) +
geom_point(size = 3) +
geom_errorbar(aes(ymin = LB, ymax = UB), width = 0.5) +
scale_x_continuous(breaks = 1:9) +
xlab('Pinkie curl') +
ylab('p(y = Shape)') +
theme_minimal()
|
summary.CAvariants <-
function(object,printdims = 3,digits = 3,...) {
cat("\n SUMMARY",object$catype, "Correspondence Analysis\n")
cat("\n Names of output objects\n")
print(names(object))
d <- object$r
d <- min(printdims, object$r)
#---------------------------------------------------------------------------
if ((object$catype=="CA")|(object$catype=="NSCA") ){
cat("\n Total inertia ", round(object$inertiasum,digits = digits), "\n\n")
cat("Inertias, percent inertias and cumulative percent inertias of the row and column space\n\n")
print(round(data.frame(object$inertias),digits=digits))
}
#----------------------------------------------------------------------------------------------
if ((object$catype=="DONSCA")|(object$catype=="DOCA") ){
cat("\n Total inertia ", round(object$inertiasum,digits=digits), "\n\n")
cat("Inertias, percent inertias and cumulative percent inertias of the row space\n\n")
print(round(data.frame(object$inertias),digits=digits))
cat("Inertias, percent inertias and cumulative percent inertias of the column space \n\n")
print(round(data.frame(object$inertias2),digits=digits))
cat("\n Polynomial Components of Inertia \n
** Row Components ** \n")
print(round(object$comps$compsR,digits=digits))
cat("\n** Column Components ** \n")
print(round(object$comps$compsC,digits=digits))
}
#-----------------------------------------------------------------------------------------------
if ((object$catype=="SONSCA")|(object$catype=="SOCA") ){
cat("\n Total inertia ", round(object$inertiasum,digits=digits), "\n\n")
cat("Inertias, percent inertias and cumulative percent inertias of the row space\n\n")
print(round(data.frame(object$inertias),digits=digits))
cat("Inertias, percent inertias and cumulative percent inertias of the column space \n\n")
print(round(data.frame(object$inertias2),digits=digits))
cat("\n Polynomial Components of Inertia \n
** Column Components ** \n")
print(round(object$comps$comps,digits=digits))
}
#############################################################
if ((object$catype=="NSCA")||(object$catype=="DONSCA")||(object$catype=="SONSCA")){
cat("\n Predictability Index for Variants of Non symmetrical Correspondence Analysis:\n")
cat("\nTau Index predicting from column \n\n")
print(round(object$tau,digits=digits))
Cstatistic<-(sum(object$Xtable)-1)*(nrow(object$Xtable)-1)*object$tau
#browser()
pvalueC<-1 - pchisq(Cstatistic, (nrow(object$Xtable)-1)*(ncol(object$Xtable)-1))
cat("\n C-statistic", round(Cstatistic,digits=digits), "and p-value", pvalueC, "\n")
}
if ((object$catype=="DOCA")|(object$catype=="DONSCA")){
cat("\n Column standard polynomial coordinates \n")
print(round(data.frame(object$Cstdcoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row standard polynomial coordinates \n")
print(round(data.frame(object$Rstdcoord[,1:d], row.names=object$rowlabels), digits=digits))
cat("\n Column principal polynomial coordinates \n")
print(round(data.frame(object$Cprinccoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row principal polynomial coordinates \n")
print(round(data.frame(object$Rprinccoord[,1:d], row.names=object$rowlabels), digits=digits))
}
if ((object$catype=="SOCA")|(object$catype=="SONSCA")){
cat("\n Column standard polynomial coordinates \n")
print(round(data.frame(object$Cstdcoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row standard coordinates \n")
print(round(data.frame(object$Rstdcoord[,1:d], row.names=object$rowlabels), digits=digits))
cat("\n Column principal coordinates \n")
print(round(data.frame(object$Cprinccoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row principal polynomial coordinates \n")
print(round(data.frame(object$Rprinccoord[,1:d], row.names=object$rowlabels), digits=digits))
}
else{
cat("\n Column standard coordinates \n")
print(round(data.frame(object$Cstdcoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row standard coordinates \n")
print(round(data.frame(object$Rstdcoord[,1:d], row.names=object$rowlabels), digits=digits))
cat("\n Column principal coordinates \n")
print(round(data.frame(object$Cprinccoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row principal coordinates \n")
print(round(data.frame(object$Rprinccoord[,1:d], row.names=object$rowlabels), digits=digits))
}
#cat("\n Inner product of coordinates (first two axes) \n")
#print(round(object$Trend,digits=digits))
}
| /R/summary.CAvariants.R | no_license | cran/CAvariants | R | false | false | 4,515 | r | summary.CAvariants <-
function(object,printdims = 3,digits = 3,...) {
cat("\n SUMMARY",object$catype, "Correspondence Analysis\n")
cat("\n Names of output objects\n")
print(names(object))
d <- object$r
d <- min(printdims, object$r)
#---------------------------------------------------------------------------
if ((object$catype=="CA")|(object$catype=="NSCA") ){
cat("\n Total inertia ", round(object$inertiasum,digits = digits), "\n\n")
cat("Inertias, percent inertias and cumulative percent inertias of the row and column space\n\n")
print(round(data.frame(object$inertias),digits=digits))
}
#----------------------------------------------------------------------------------------------
if ((object$catype=="DONSCA")|(object$catype=="DOCA") ){
cat("\n Total inertia ", round(object$inertiasum,digits=digits), "\n\n")
cat("Inertias, percent inertias and cumulative percent inertias of the row space\n\n")
print(round(data.frame(object$inertias),digits=digits))
cat("Inertias, percent inertias and cumulative percent inertias of the column space \n\n")
print(round(data.frame(object$inertias2),digits=digits))
cat("\n Polynomial Components of Inertia \n
** Row Components ** \n")
print(round(object$comps$compsR,digits=digits))
cat("\n** Column Components ** \n")
print(round(object$comps$compsC,digits=digits))
}
#-----------------------------------------------------------------------------------------------
if ((object$catype=="SONSCA")|(object$catype=="SOCA") ){
cat("\n Total inertia ", round(object$inertiasum,digits=digits), "\n\n")
cat("Inertias, percent inertias and cumulative percent inertias of the row space\n\n")
print(round(data.frame(object$inertias),digits=digits))
cat("Inertias, percent inertias and cumulative percent inertias of the column space \n\n")
print(round(data.frame(object$inertias2),digits=digits))
cat("\n Polynomial Components of Inertia \n
** Column Components ** \n")
print(round(object$comps$comps,digits=digits))
}
#############################################################
if ((object$catype=="NSCA")||(object$catype=="DONSCA")||(object$catype=="SONSCA")){
cat("\n Predictability Index for Variants of Non symmetrical Correspondence Analysis:\n")
cat("\nTau Index predicting from column \n\n")
print(round(object$tau,digits=digits))
Cstatistic<-(sum(object$Xtable)-1)*(nrow(object$Xtable)-1)*object$tau
#browser()
pvalueC<-1 - pchisq(Cstatistic, (nrow(object$Xtable)-1)*(ncol(object$Xtable)-1))
cat("\n C-statistic", round(Cstatistic,digits=digits), "and p-value", pvalueC, "\n")
}
if ((object$catype=="DOCA")|(object$catype=="DONSCA")){
cat("\n Column standard polynomial coordinates \n")
print(round(data.frame(object$Cstdcoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row standard polynomial coordinates \n")
print(round(data.frame(object$Rstdcoord[,1:d], row.names=object$rowlabels), digits=digits))
cat("\n Column principal polynomial coordinates \n")
print(round(data.frame(object$Cprinccoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row principal polynomial coordinates \n")
print(round(data.frame(object$Rprinccoord[,1:d], row.names=object$rowlabels), digits=digits))
}
if ((object$catype=="SOCA")|(object$catype=="SONSCA")){
cat("\n Column standard polynomial coordinates \n")
print(round(data.frame(object$Cstdcoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row standard coordinates \n")
print(round(data.frame(object$Rstdcoord[,1:d], row.names=object$rowlabels), digits=digits))
cat("\n Column principal coordinates \n")
print(round(data.frame(object$Cprinccoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row principal polynomial coordinates \n")
print(round(data.frame(object$Rprinccoord[,1:d], row.names=object$rowlabels), digits=digits))
}
else{
cat("\n Column standard coordinates \n")
print(round(data.frame(object$Cstdcoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row standard coordinates \n")
print(round(data.frame(object$Rstdcoord[,1:d], row.names=object$rowlabels), digits=digits))
cat("\n Column principal coordinates \n")
print(round(data.frame(object$Cprinccoord[,1:d], row.names=object$collabels), digits=digits))
cat("\n Row principal coordinates \n")
print(round(data.frame(object$Rprinccoord[,1:d], row.names=object$rowlabels), digits=digits))
}
#cat("\n Inner product of coordinates (first two axes) \n")
#print(round(object$Trend,digits=digits))
}
|
testlist <- list(type = -819920896L, z = NaN)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609893750-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 99 | r | testlist <- list(type = -819920896L, z = NaN)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
#Useful for knowing missing data
install.packages("Amelia")
library(ggplot2)
library(dplyr)
library(ggthemes)
library(corrgram)
library(corrplot)
library(caTools)
library(Amelia)
#Goal: Predict Survival of passengers onboard titanic (Current accuracy 76.67%)
#Data: https://www.kaggle.com/c/titanic/data
filepath <- "D:\\Git_DataScience_Projects\\DataScience\\Datasets"
titanic_train <- read.csv(paste(filepath , "Kaggle_Titanic\\Data\\train.csv", sep = "\\"))
titanic_test <- read.csv(paste(filepath, "Kaggle_Titanic\\Data\\test.csv", sep = "\\"))
#Lot of missing Age values
missmap(titanic_train, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
ggplot(titanic_train, aes(x = Pclass)) + geom_bar(aes(fill = factor(Pclass)))
ggplot(titanic_train, aes(x = Sex)) + geom_bar(aes(fill = factor(Sex)))
#177 na values removed
ggplot(titanic_train, aes(x = Age)) + geom_histogram(bins = 20, alpha = 0.5, fill = "blue")
ggplot(titanic_train, aes(x = SibSp)) + geom_bar()
ggplot(titanic_train, aes(x = Parch)) + geom_bar()
ggplot(titanic_train, aes(x = Fare)) + geom_histogram(bins = 10, alpha = 0.5, fill = "green", color = "black")
#Plot Age according to Pclass to compute age based on class to fill na values in age
ggplot(titanic_train, aes(x = Pclass, y = Age)) +
geom_boxplot(aes(group = Pclass, fill = factor(Pclass)), alpha = 0.4) +
scale_y_continuous(breaks = seq(min(0), max(80, by = 2))) + theme_bw()
#####
#Calculate avg ages for missing age values based on average age per class
#####
calc_age <- function(age , class) {
out <- age
for(i in 1:length(age)) {
if(is.na(age[i])){
if(class[i] == 1){
out[i] <- 37
} else if(class[i] == 2){
out[i] <- 29
} else{
out[i] <- 24
}
} else {
out[i] <- age[i]
}
}
return (out)
}
fixed.ages <- calc_age(titanic_train$Age , titanic_train$Pclass)
titanic_train$Age <- fixed.ages
missmap(titanic_train, main = "Check", col = c("Yellow", "Black"), legend = F)
#Feature Engineering
#Grab Titles from name, Grab Cabin name letter, etc
titanic_train_mod <- select(titanic_train, -Cabin, -PassengerId, -Ticket, -Cabin, -Name)
titanic_train_mod$Survived <- factor(titanic_train_mod$Survived)
titanic_train_mod$Pclass <- factor(titanic_train_mod$Pclass)
titanic_train_mod$Parch <- factor(titanic_train_mod$Parch)
titanic_train_mod$SibSp <- factor(titanic_train_mod$SibSp)
str(titanic_train_mod)
################# Predict Survival ##################
#Generalized linear model
log.model <- glm(Survived ~ . , family = binomial(link = "logit"), data = titanic_train_mod)
summary(log.model)
#Lot of missing Age values
missmap(titanic_test, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
#Plot Age according to Pclass to compute age based on class to fill na values in age
ggplot(titanic_test, aes(x = Pclass, y = Age)) +
geom_boxplot(aes(group = Pclass, fill = factor(Pclass)), alpha = 0.4) +
scale_y_continuous(breaks = seq(min(0), max(80, by = 2))) + theme_bw()
# class 1 <- 42 , class 2 <- 26 , class 3 <- 24
#####
#Calculate avg ages for missing age values based on average age per class
#####
calc_age_testset <- function(age , class) {
out <- age
for(i in 1:length(age)) {
if(is.na(age[i])){
if(class[i] == 1){
out[i] <- 42
} else if(class[i] == 2){
out[i] <- 26
} else{
out[i] <- 24
}
} else {
out[i] <- age[i]
}
}
return (out)
}
fixed.ages <- calc_age_testset(titanic_test$Age, titanic_test$Pclass)
titanic_test$Age <- fixed.ages
#Get mean per passenger class for fares
ggplot(titanic_test, aes(x = Pclass, y = Fare)) +
geom_boxplot(aes(group = Pclass, fill = factor(Pclass)), alpha = 0.4) +
scale_y_continuous(breaks = seq(0, 600, 10)) + theme_bw()
#Class 3 mean is 10 so assign that to the NA value
titanic_test[is.na(titanic_test$Fare), "Fare"] <- mean(titanic_test$Fare)
titanic_test_mod <- select(titanic_test, -Cabin, -PassengerId, -Ticket, -Cabin, -Name)
titanic_test_mod$Pclass <- factor(titanic_test_mod$Pclass)
titanic_test_mod$Parch <- factor(titanic_test_mod$Parch)
titanic_test_mod$SibSp <- factor(titanic_test_mod$SibSp)
#Remove 2 rows with 9 Parch values as it does not match with train dataset
titanic_test_mod[titanic_test_mod$Parch == 9, "Parch"] <- 6
fitted.probablities <- predict(log.model, titanic_test_mod, type = "response")
fitted.results <- ifelse(fitted.probablities > 0.5, 1 , 0)
submission <- cbind(titanic_test$PassengerId, fitted.results)
submission <- as.data.frame(submission)
colnames(submission) <- c("PassengerId", "Survived")
write.csv(submission, file = "mysub1.csv")
#######################
###
# 2. Submission
###
#######################
#######################
###
# Different Feature Engineering
###
#######################
titanic_train <- read.csv(paste(filepath , "titanic_train.csv", sep = "\\"))
titanic_test <- read.csv(paste(filepath, "titanic_test.csv", sep = "\\"))
#Lot of missing Age values
missmap(titanic_train, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
#######################
###
# FIX MISSING AGE VALUES BASED ON PCLASS AVERAGE AGE
###
#######################
fixed.ages <- calc_age(titanic_train$Age , titanic_train$Pclass)
titanic_train$Age <- fixed.ages
#######################
###
# Age Groups
###
#######################
############################
###
#1. We can see from the following table, a higher preference was given to females overall when choosing to save a passenger
#2. Out of Total 1st class female passenger, only 3% were killed while 97% were saved / survived
# As compared with that of total 1st class male passengers, 63% were killed while only 37% were saved / survived
#3. Similar trend is seen with both female and male 2nd class passengers
#4. However, an interesting stat is observed with 3rd class female passengers with only 50-50 chances of survival
# This is interesting as it might indicate that 3rd class female passengers were not treated fairly as compared to that
# of 1st and 2nd class female passengers who have death rate of only 3% and 7% respectively whereas for
# 3rd class female passengers the death rate is extremely high of 50%
###
titanic_train %>% group_by(Pclass, Sex, Survived) %>% summarise(Total_Passengers = n()) %>%
mutate(Percent = Total_Passengers / sum(Total_Passengers) * 100)
#############################
#TODO: 20s, 30s, 40s, etc...
age_groups <- function(age){
if(age < 20){
return("Below 20")
} else if(age >= 20 & age < 30){
return("Twenties")
} else if(age >= 30 & age < 40){
return("Thirties")
} else if(age >= 40 & age < 50){
return("Fourties")
} else if(age >= 50 & age <= 60){
return("Fifties")
} else {
return("Above 60")
}
}
#Lot of missing Age values
missmap(titanic_test, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
##Making same changes to test dataset
fixed.ages <- calc_age_testset(titanic_test$Age, titanic_test$Pclass)
titanic_test$Age <- fixed.ages
titanic_train$AgeGroup <- as.factor(sapply(titanic_train$Age, age_groups))
titanic_test$AgeGroup <- as.factor(sapply(titanic_test$Age, age_groups))
titanic_train <- mutate(titanic_train, FamilySize = SibSp + Parch)
titanic_test <- mutate(titanic_test, FamilySize = SibSp + Parch)
titanic_train$Survived <- factor(titanic_train$Survived)
titanic_train$Pclass <- factor(titanic_train$Pclass)
titanic_train$FamilySize <- factor(titanic_train$FamilySize)
titanic_test$Pclass <- factor(titanic_test$Pclass)
titanic_test$FamilySize <- factor(titanic_test$FamilySize)
titanic_train <- select(titanic_train, -SibSp, -Parch, -Name, -Age, -Ticket, -Cabin, -PassengerId)
titanic_test <- select(titanic_test, -SibSp, -Parch, -Name, -Age, -Ticket, -Cabin, -PassengerId)
log.model <- glm(Survived ~ . , family = binomial(link = "logit"), data = titanic_train)
summary(log.model)
#Null value of fare from test
titanic_test[is.na(titanic_test$Fare), "Fare"] <- 10
fitted.probablities <- predict(log.model, titanic_test, type = "response")
fitted.results <- ifelse(fitted.probablities > 0.5, 1 , 0)
temp <- read.csv(paste(filepath , "titanic_test.csv", sep = "\\"))
submission <- cbind(temp$PassengerId, fitted.results)
submission <- as.data.frame(submission)
colnames(submission) <- c("PassengerId", "Survived")
write.csv(submission, file = "mysub2.csv", row.names = F)
| /Datasets/Kaggle_Titanic/RScripts/Titanic_logistic.R | no_license | pixelmaster11/DataScience | R | false | false | 8,717 | r | #Useful for knowing missing data
install.packages("Amelia")
library(ggplot2)
library(dplyr)
library(ggthemes)
library(corrgram)
library(corrplot)
library(caTools)
library(Amelia)
#Goal: Predict Survival of passengers onboard titanic (Current accuracy 76.67%)
#Data: https://www.kaggle.com/c/titanic/data
filepath <- "D:\\Git_DataScience_Projects\\DataScience\\Datasets"
titanic_train <- read.csv(paste(filepath , "Kaggle_Titanic\\Data\\train.csv", sep = "\\"))
titanic_test <- read.csv(paste(filepath, "Kaggle_Titanic\\Data\\test.csv", sep = "\\"))
#Lot of missing Age values
missmap(titanic_train, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
ggplot(titanic_train, aes(x = Pclass)) + geom_bar(aes(fill = factor(Pclass)))
ggplot(titanic_train, aes(x = Sex)) + geom_bar(aes(fill = factor(Sex)))
#177 na values removed
ggplot(titanic_train, aes(x = Age)) + geom_histogram(bins = 20, alpha = 0.5, fill = "blue")
ggplot(titanic_train, aes(x = SibSp)) + geom_bar()
ggplot(titanic_train, aes(x = Parch)) + geom_bar()
ggplot(titanic_train, aes(x = Fare)) + geom_histogram(bins = 10, alpha = 0.5, fill = "green", color = "black")
#Plot Age according to Pclass to compute age based on class to fill na values in age
ggplot(titanic_train, aes(x = Pclass, y = Age)) +
geom_boxplot(aes(group = Pclass, fill = factor(Pclass)), alpha = 0.4) +
scale_y_continuous(breaks = seq(min(0), max(80, by = 2))) + theme_bw()
#####
#Calculate avg ages for missing age values based on average age per class
#####
calc_age <- function(age , class) {
out <- age
for(i in 1:length(age)) {
if(is.na(age[i])){
if(class[i] == 1){
out[i] <- 37
} else if(class[i] == 2){
out[i] <- 29
} else{
out[i] <- 24
}
} else {
out[i] <- age[i]
}
}
return (out)
}
fixed.ages <- calc_age(titanic_train$Age , titanic_train$Pclass)
titanic_train$Age <- fixed.ages
missmap(titanic_train, main = "Check", col = c("Yellow", "Black"), legend = F)
#Feature Engineering
#Grab Titles from name, Grab Cabin name letter, etc
titanic_train_mod <- select(titanic_train, -Cabin, -PassengerId, -Ticket, -Cabin, -Name)
titanic_train_mod$Survived <- factor(titanic_train_mod$Survived)
titanic_train_mod$Pclass <- factor(titanic_train_mod$Pclass)
titanic_train_mod$Parch <- factor(titanic_train_mod$Parch)
titanic_train_mod$SibSp <- factor(titanic_train_mod$SibSp)
str(titanic_train_mod)
################# Predict Survival ##################
#Generalized linear model
log.model <- glm(Survived ~ . , family = binomial(link = "logit"), data = titanic_train_mod)
summary(log.model)
#Lot of missing Age values
missmap(titanic_test, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
#Plot Age according to Pclass to compute age based on class to fill na values in age
ggplot(titanic_test, aes(x = Pclass, y = Age)) +
geom_boxplot(aes(group = Pclass, fill = factor(Pclass)), alpha = 0.4) +
scale_y_continuous(breaks = seq(min(0), max(80, by = 2))) + theme_bw()
# class 1 <- 42 , class 2 <- 26 , class 3 <- 24
#####
#Calculate avg ages for missing age values based on average age per class
#####
calc_age_testset <- function(age , class) {
out <- age
for(i in 1:length(age)) {
if(is.na(age[i])){
if(class[i] == 1){
out[i] <- 42
} else if(class[i] == 2){
out[i] <- 26
} else{
out[i] <- 24
}
} else {
out[i] <- age[i]
}
}
return (out)
}
fixed.ages <- calc_age_testset(titanic_test$Age, titanic_test$Pclass)
titanic_test$Age <- fixed.ages
#Get mean per passenger class for fares
ggplot(titanic_test, aes(x = Pclass, y = Fare)) +
geom_boxplot(aes(group = Pclass, fill = factor(Pclass)), alpha = 0.4) +
scale_y_continuous(breaks = seq(0, 600, 10)) + theme_bw()
#Class 3 mean is 10 so assign that to the NA value
titanic_test[is.na(titanic_test$Fare), "Fare"] <- mean(titanic_test$Fare)
titanic_test_mod <- select(titanic_test, -Cabin, -PassengerId, -Ticket, -Cabin, -Name)
titanic_test_mod$Pclass <- factor(titanic_test_mod$Pclass)
titanic_test_mod$Parch <- factor(titanic_test_mod$Parch)
titanic_test_mod$SibSp <- factor(titanic_test_mod$SibSp)
#Remove 2 rows with 9 Parch values as it does not match with train dataset
titanic_test_mod[titanic_test_mod$Parch == 9, "Parch"] <- 6
fitted.probablities <- predict(log.model, titanic_test_mod, type = "response")
fitted.results <- ifelse(fitted.probablities > 0.5, 1 , 0)
submission <- cbind(titanic_test$PassengerId, fitted.results)
submission <- as.data.frame(submission)
colnames(submission) <- c("PassengerId", "Survived")
write.csv(submission, file = "mysub1.csv")
#######################
###
# 2. Submission
###
#######################
#######################
###
# Different Feature Engineering
###
#######################
titanic_train <- read.csv(paste(filepath , "titanic_train.csv", sep = "\\"))
titanic_test <- read.csv(paste(filepath, "titanic_test.csv", sep = "\\"))
#Lot of missing Age values
missmap(titanic_train, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
#######################
###
# FIX MISSING AGE VALUES BASED ON PCLASS AVERAGE AGE
###
#######################
fixed.ages <- calc_age(titanic_train$Age , titanic_train$Pclass)
titanic_train$Age <- fixed.ages
#######################
###
# Age Groups
###
#######################
############################
###
#1. We can see from the following table, a higher preference was given to females overall when choosing to save a passenger
#2. Out of Total 1st class female passenger, only 3% were killed while 97% were saved / survived
# As compared with that of total 1st class male passengers, 63% were killed while only 37% were saved / survived
#3. Similar trend is seen with both female and male 2nd class passengers
#4. However, an interesting stat is observed with 3rd class female passengers with only 50-50 chances of survival
# This is interesting as it might indicate that 3rd class female passengers were not treated fairly as compared to that
# of 1st and 2nd class female passengers who have death rate of only 3% and 7% respectively whereas for
# 3rd class female passengers the death rate is extremely high of 50%
###
titanic_train %>% group_by(Pclass, Sex, Survived) %>% summarise(Total_Passengers = n()) %>%
mutate(Percent = Total_Passengers / sum(Total_Passengers) * 100)
#############################
#TODO: 20s, 30s, 40s, etc...
age_groups <- function(age){
if(age < 20){
return("Below 20")
} else if(age >= 20 & age < 30){
return("Twenties")
} else if(age >= 30 & age < 40){
return("Thirties")
} else if(age >= 40 & age < 50){
return("Fourties")
} else if(age >= 50 & age <= 60){
return("Fifties")
} else {
return("Above 60")
}
}
#Lot of missing Age values
missmap(titanic_test, main = "Missing Map", col = c("Yellow", "Black"), legend = FALSE)
##Making same changes to test dataset
fixed.ages <- calc_age_testset(titanic_test$Age, titanic_test$Pclass)
titanic_test$Age <- fixed.ages
titanic_train$AgeGroup <- as.factor(sapply(titanic_train$Age, age_groups))
titanic_test$AgeGroup <- as.factor(sapply(titanic_test$Age, age_groups))
titanic_train <- mutate(titanic_train, FamilySize = SibSp + Parch)
titanic_test <- mutate(titanic_test, FamilySize = SibSp + Parch)
titanic_train$Survived <- factor(titanic_train$Survived)
titanic_train$Pclass <- factor(titanic_train$Pclass)
titanic_train$FamilySize <- factor(titanic_train$FamilySize)
titanic_test$Pclass <- factor(titanic_test$Pclass)
titanic_test$FamilySize <- factor(titanic_test$FamilySize)
titanic_train <- select(titanic_train, -SibSp, -Parch, -Name, -Age, -Ticket, -Cabin, -PassengerId)
titanic_test <- select(titanic_test, -SibSp, -Parch, -Name, -Age, -Ticket, -Cabin, -PassengerId)
log.model <- glm(Survived ~ . , family = binomial(link = "logit"), data = titanic_train)
summary(log.model)
#Null value of fare from test
titanic_test[is.na(titanic_test$Fare), "Fare"] <- 10
fitted.probablities <- predict(log.model, titanic_test, type = "response")
fitted.results <- ifelse(fitted.probablities > 0.5, 1 , 0)
temp <- read.csv(paste(filepath , "titanic_test.csv", sep = "\\"))
submission <- cbind(temp$PassengerId, fitted.results)
submission <- as.data.frame(submission)
colnames(submission) <- c("PassengerId", "Survived")
write.csv(submission, file = "mysub2.csv", row.names = F)
|
## > source('cachematrix.R')
## > m <- makeCacheMatrix(matrix(c(2, 0, 0, 2), c(2, 2)))
## > cacheSolve(m)
## [,1] [,2]
## [1,] 0.5 0.0
## [2,] 0.0 0.5
## Create a special "matrix", which is a list containing
## a function to
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse matrix
## - get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## Calculate the inverse of the special "matrix" created with the above
## function, reusing cached result if it is available
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
m <- x$get()
i <- solve(m, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | chaitanyavmf/ProgrammingAssignment2 | R | false | false | 1,069 | r |
## > source('cachematrix.R')
## > m <- makeCacheMatrix(matrix(c(2, 0, 0, 2), c(2, 2)))
## > cacheSolve(m)
## [,1] [,2]
## [1,] 0.5 0.0
## [2,] 0.0 0.5
## Create a special "matrix", which is a list containing
## a function to
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse matrix
## - get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## Calculate the inverse of the special "matrix" created with the above
## function, reusing cached result if it is available
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
m <- x$get()
i <- solve(m, ...)
x$setinverse(i)
i
}
|
options(stringsAsFactors = FALSE)
# Load data of spot and 1-month forward exchange rates
data = read.csv("proj15_spot_forward_exchange_rate.csv")
# Data setup
colnames(data) = c('date','AUD','AUD_f','JPY','JPY_f','GBP','GBP_f')
data$date = as.Date(data$date,"%m/%d/%Y")
# Convert the quoted form to US dollar/currency
data[,2:5] = 1/data[,2:5]
# Take logarithm
data[,2:7] = log(data[,2:7])
# Extract spot and forward exchange rate
spot = data.frame(date=data$date,AUD=data$AUD,JPY=data$JPY,GBP=data$GBP)
forward = data.frame(date=data$date,AUD_f=data$AUD_f,JPY_f=data$JPY_f,GBP_f=data$GBP_f)
# Plot log spot exchange rate
plot(spot[,1:2],typ='l',ylab='AUD',main='Exchange Rate')
plot(spot[,c(1,3)],typ='l',ylab='JPY',main='Exchange Rate')
plot(spot[,c(1,4)],typ='l',ylab='GBP',main='Exchange Rate')
par(mfrow=c(1,2))
# ACF and PACF
acf(spot$AUD);pacf(spot$AUD)
acf(spot$JPY);pacf(spot$JPY)
acf(spot$GBP);pacf(spot$GBP)
par(mfrow=c(1,1))
# ADF test
library(fUnitRoots)
adfTest(spot$AUD,type=c("nc"));adfTest(spot$JPY,type=c("nc"));adfTest(spot$GBP,type=c("nc"))
# Extract data before 2013-07-01
AUD_diff = diff(spot[spot$date<="2013-07-01",2])
JPY_diff = diff(spot[spot$date<="2013-07-01",3])
GBP_diff = diff(spot[spot$date<="2013-07-01",4])
date = spot[spot$date<="2013-07-01",1]
# Plot log return of exchange rate
plot(date[-1],AUD_diff,typ='l',ylab='AUD',main='Difference of Log Exchange Rate (AUD)')
plot(JPY_diff,typ='l',ylab='JPY',main='Difference of Log Exchange Rate (JPY)')
plot(GBP_diff,typ='l',ylab='GBP',main='Difference of Log Exchange Rate (GBP)')
# ACF and PACF of log return of exchange rate
par(mfrow=c(1,2))
acf(AUD_diff,main='Log Return of Exchange Rate(AUD)');pacf(AUD_diff,main='Log Return of Exchange Rate(AUD)')
acf(JPY_diff,main='Log Return of Exchange Rate(JPY)');pacf(JPY_diff,main='Log Return of Exchange Rate(JPY)')
acf(GBP_diff,main='Log Return of Exchange Rate(GBP)');pacf(GBP_diff,main='Log Return of Exchange Rate(GBP)')
par(mfrow=c(1,1))
# ADF test after differencing ## no unit root
adfTest(AUD_diff);adfTest(JPY_diff);adfTest(GBP_diff)
hist(AUD_diff,breaks=20,ylab='Frequency',xlab='',main='Difference of Log Exchange Rate')
install.packages('TSA')
library(TSA)
# Identify a good ARMA order using EACF
eacf(AUD_diff, ar.max = 8, ma.max = 8)
eacf(JPY_diff, ar.max = 8, ma.max = 8)
eacf(GBP_diff, ar.max = 8, ma.max = 8)
## ARCH effect analysis
t.test(AUD_diff)
t.test(JPY_diff)
t.test(GBP_diff)
# Null hypothesis cannot be rejected. The true mean should be equal to zero
# ACF of diff and squared diff of log exchange rate
par(mfrow=c(1,2))
acf(AUD_diff,main='Log Return of Exchange Rate(AUD)',ylim=c(-0.2,0.4))
acf(AUD_diff^2,main='Squared Log Return of Exchange Rate(AUD)',ylim=c(-0.2,0.4))
acf(JPY_diff,main='Log Return of Exchange Rate(JPY)',ylim=c(-0.2,0.4))
acf(JPY_diff^2,main='Squared Log Return of Exchange Rate(JPY)',ylim=c(-0.2,0.4)) ## not too much dependence
acf(GBP_diff,main='Difference of Exchange Rate(GBP)',ylim=c(-0.2,0.4))
acf(GBP_diff^2,main='Squared Log Return of Exchange Rate(GBP)',ylim=c(-0.2,0.4))
par(mfrow=c(1,1))
# Ljung-Box test
Box.test(AUD_diff,lag=12,type=("Ljung-Box"))
Box.test(AUD_diff^2,lag=12,type=("Ljung-Box"))
Box.test(JPY_diff,lag=12,type=("Ljung-Box"))
Box.test(JPY_diff^2,lag=12,type=("Ljung-Box"))
Box.test(GBP_diff,lag=12,type=("Ljung-Box"))
Box.test(GBP_diff^2,lag=12,type=("Ljung-Box"))
library(fGarch)
# Fit data into Garch(1,1), mean equation is a constant
m1 = garchFit(~garch(1,1),data=AUD_diff,trace=F)
m2 = garchFit(~garch(1,1),data=JPY_diff,trace=F)
m3 = garchFit(~garch(1,1),data=GBP_diff,trace=F)
summary(m1);summary(m2);summary(m3)
library(rugarch)
# IGARCH for AUD
spec = ugarchspec(variance.model=list(model="iGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0))) # mean equation=constant
m1_I = ugarchfit(spec=spec,data=AUD_diff)
m1_I
# APARCH
m1_ap = garchFit(~1+aparch(1,1), data=AUD_diff, trace=F)
# EGARCH for AUD
egarch11.spec = ugarchspec(variance.model=list(model="eGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0)))# mean equation=constant
m1_E = ugarchfit(egarch11.spec,data= AUD_diff)
#m2_E = ugarchfit(egarch11.spec,data= JPY_diff)
#m3_E = ugarchfit(egarch11.spec,data= GBP_diff)
summary(m1_E)
m1_ged = garchFit(~garch(1,1),data=AUD_diff,trace=F,cond.dist=c("ged"))
m2_ged = garchFit(~garch(1,1),data=JPY_diff,trace=F,cond.dist=c("ged"))
m3_ged = garchFit(~garch(1,1),data=GBP_diff,trace=F,cond.dist=c("ged"))
summary(m1_ged);summary(m2_ged);summary(m3_ged)
# In the case of AUD/US, alpha1 and beta1 are significant at the level 0.01
# We think GARCH(1,1) model can be used to predcit log return of exchange rate
m1_res = m1@residuals
m1_res_std = m1@residuals/volatility(m1)
m2_res = m2@residuals
m2_res_std = m2@residuals/volatility(m2)
m3_res = m3@residuals
m3_res_std = m3@residuals/volatility(m3)
# Observe volatility and log return of exchange rate
plot(AUD_diff,typ='l',ylab='USD/AUD')
lines(volatility(m1),col='red')
legend(160,0.1,c('log return of USD/AUD','volatility'),col=c(1,2),lwd=c(2,2))
# ACFs
par(mfrow=c(2,2))
acf(m1_res,main='Residual (AUD)')
acf(m1_res^2,main='Residual Squared (AUD)')
acf(m1_res_std,main='GARCH(1,1) Std Residual (AUD)')
acf(m1_res_std^2,main='GARCH(1,1) Std Residual Squared (AUD)')
acf(m2_res,main='Residual (JPY)')
acf(m2_res^2,main='Residual Squared (JPY)')
acf(m2_res_std,main='GARCH(1,1) Std Residual (JPY)')
acf(m2_res_std^2,main='GARCH(1,1) Std Residual Squared (JPY)')
acf(m3_res,main='Residual (GBP)')
acf(m3_res^2,main='Residual Squared (GBP)')
acf(m3_res_std,main='GARCH(1,1) Std Residual (GBP)')
acf(m3_res_std^2,main='GARCH(1,1) Std Residual Squared (GBP)')
# PACFs
pacf(m1_res,main='Residual (AUD)',ylim=c(-0.1,0.5))
pacf(m1_res^2,main='Residual Squared (AUD)',ylim=c(-0.1,0.5))
pacf(m1_res_std,main='GARCH(1,1) Std Residual (AUD)',ylim=c(-0.1,0.5))
pacf(m1_res_std^2,main='GARCH(1,1) Std Residual Squared (AUD)',ylim=c(-0.1,0.5))
pacf(m2_res,main='Residual (JPY)',ylim=c(-0.15,0.5))
pacf(m2_res^2,main='Residual Squared (JPY)',ylim=c(-0.15,0.5))
pacf(m2_res_std,main='GARCH(1,1) Std Residual (JPY)',ylim=c(-0.15,0.5))
pacf(m2_res_std^2,main='GARCH(1,1) Std Residual Squared (JPY)',ylim=c(-0.15,0.5))
pacf(m3_res,main='Residual (GBP)',ylim=c(-0.1,0.5))
pacf(m3_res^2,main='Residual Squared (GBP)',ylim=c(-0.1,0.5))
pacf(m3_res_std,main='GARCH(1,1) Std Residual (GBP)',ylim=c(-0.1,0.5))
pacf(m3_res_std^2,main='GARCH(1,1) Std Residual Squared (GBP)',ylim=c(-0.1,0.5))
par(mfrow=c(1,1))
plot(m1_res_std,typ='l',ylab='',main='Standardized Residuals (AUD)')
plot(m2_res_std,typ='l',ylab='',main='Standardized Residuals (JPY)')
plot(m3_res_std,typ='l',ylab='',main='Standardized Residuals (GBP)')
n = nrow(spot) # number of full data
m = nrow(spot[spot$date<="2013-07-01",])
realized_return = as.data.frame(matrix(rep(0,(n-m)*3),nrow=n-m))
colnames(realized_return)=c('AUD','JPY','GBP')
data_temp = c()
for(j in 1:3)
{
for (i in 1:(n-m))
{
data_temp = diff(spot[1:m-1+i,j+1])
mdl = garchFit(~garch(1,1),data=data_temp,trace=F)
pred = predict(mdl,n.ahead = 1)[,3] # predict the volatility of the next day
mdl_res_std = mdl@residuals/volatility(mdl)
epsilon = mdl_res_std[length(mdl_res_std)]
if (mdl@fit$coef[1]+pred * epsilon > (forward[m-1+i,j+1] - spot[m-1+i,j+1]))
realized_return[i,j] = spot[m+i,j+1] - forward[m-1+i,j+1]
else
realized_return[i,j] = forward[m-1+i,j+1] - spot[m+i,j+1]
}
}
rm(i,j,mdl,pred,epsilon,data_temp)
colMeans(realized_return, na.rm = FALSE, dims = 1)
# Using Generalized Error as underlying distributions of epsilon_t
realized_return_ged = as.data.frame(matrix(rep(0,(n-m)*3),nrow=n-m))
colnames(realized_return_ged)=c('AUD','JPY','GBP')
data_temp=c()
for(j in 1:3)
{
for (i in 1:(n-m))
{
data_temp = diff(spot[1:m-1+i,j+1])
mdl = garchFit(~garch(1,1),data=data_temp,trace=F,cond.dist=c("ged"))
pred = predict(mdl,n.ahead = 1)[,3] # predict the volatility of the next day
mdl_res_std = mdl@residuals/volatility(mdl)
epsilon = sample(mdl_res_std, 1, replace = TRUE)
if (mdl@fit$coef[1]+pred * epsilon > (forward[m-1+i,j+1] - spot[m-1+i,j+1]))
realized_return_ged[i,j] = spot[m+i,j+1] - forward[m-1+i,j+1]
else
realized_return_ged[i,j] = forward[m-1+i,j+1] - spot[m+i,j+1]
}
}
rm(i,j,mdl,pred,epsilon,data_temp)
# OLS methods
realized_return_ols = as.data.frame(matrix(rep(0,(n-m)*3),nrow=n-m))
colnames(realized_return_ols)=c('AUD','JPY','GBP')
alpha_temp = c()
beta_temp = c()
for(j in 1:3)
{
for (i in 1:(n-m))
{
y = diff(spot[1:(m-1+i),j+1])
X = forward[1:(m-2+i),j+1]-spot[1:(m-2+i),j+1]
mdl = lm(y~X)
if (mdl$coefficients[1]+ mdl$coefficients[2]*((forward[m-1+i,j+1] - spot[m-1+i,j+1]))> (forward[m-1+i,j+1] - spot[m-1+i,j+1]))
realized_return_ols[i,j] = spot[m+i,j+1] - forward[m-1+i,j+1]
else
realized_return_ols[i,j] = forward[m-1+i,j+1] - spot[m+i,j+1]
}
}
rm(i,j,y,X,b)
colMeans(realized_return_ols, na.rm = FALSE, dims = 1)
## IGARCH
install.packages("rugarch")
library(rugarch)
realized_return_AUD = c()
for (i in 1:(n-m))
{
data_temp = diff(spot[1:m-1+i,2])
spec = ugarchspec(variance.model=list(model="iGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0))) # mean equation=constant
mdl = ugarchfit(spec=spec,data=data_temp)
# standardized residuals
residual_std = mdl@fit$residuals/mdl@fit$sigma
# predict the volatility of the next day
pred1 = as.numeric(sqrt(mdl@fit$coef[2]+mdl@fit$coef[3]*mdl@fit$residuals[length(mdl@fit$residuals)]^2
+mdl@fit$coef[4]*mdl@fit$sigma[length(mdl@fit$sigma)]^2))
# select the last standardized residual as the next one
epsilon = residual_std[length(residual_std)]
# sample(residual_std,size=1,replace=TRUE)
if (mdl@fit$coef[1]+pred1 * epsilon > (forward[m-1+i,2] - spot[m-1+i,2]))
realized_return_AUD[i] = spot[m+i,2] - forward[m-1+i,2]
else
realized_return_AUD[i] = forward[m-1+i,2] - spot[m+i,2]
}
rm(spec,mdl,residual_std,pred1,epsilon)
realized_return_AUD = data.frame(rtn = realized_return_AUD)
mean(realized_return_AUD$rtn)
# Relevant measures
TB = read.csv("TB30.csv")
SP500 = read.csv("sp500.csv")
TB30 = cbind(TB[,2],TB[,2],TB[,2])
excess_return_3 = realized_return-TB30
excess_return_mkt = SP500[,2] - TB[,2]
excess_return_aud = realized_return_AUD$rtn-TB$t30ret
colMeans(excess_return_3, na.rm = FALSE, dims = 1)
#t.test(excess_return_3[,1],mu=0)
#t.test(excess_return_3[,2],mu=0)
#t.test(excess_return_3[,3],mu=0)
sharpe_ratio_aud = mean(excess_return_3$AUD) /sd(excess_return_3$AUD)
sharpe_ratio_jpy = mean(excess_return_3$JPY) /sd(excess_return_3$JPY)
sharpe_ratio_gbp = mean(excess_return_3$GBP) /sd(excess_return_3$GBP)
sharpe_ratio_mkt = mean(excess_return_mkt) /sd(excess_return_mkt)
sharpe_ratio_aud_igarch = mean(excess_return_aud) /sd(excess_return_aud)
cat('Sharpe Ratios:',sharpe_ratio_aud,sharpe_ratio_jpy,sharpe_ratio_gbp)
# Count the winning months
win = c(sum(realized_return$AUD>0),sum(realized_return$JPY>0),sum(realized_return$GBP>0))
sum(realized_return_AUD$rtn>0)
# Count the lsoing months
lose = c(sum(realized_return$AUD<0),sum(realized_return$JPY<0),sum(realized_return$GBP<0))
sum(realized_return_AUD$rtn<0)
cat('USD/AUD',sharpe_ratio_aud,'USD/JPY',sharpe_ratio_jpy,'USD/GBP',sharpe_ratio_gbp,'MARKET',sharpe_ratio_mkt)
# Plot realized returns of GARCH(1,1) and OLS
par(mfrow=c(3,1))
plot(realized_return[,1],type='l',xlab='month',
ylab='Return of AUD',col='red',main='Return comparison of GARCH(1,1) and OLS')
lines(realized_return_ols[,1],type='l',col='blue')
legend(10,0.045,c('GARCH','OLS'),col=c(2,4),lwd=c(2,2),bty='n',cex=1)
plot(realized_return[,2],type='l',xlab='month',
ylab='Return of JPY',col='red',main='Return comparison of GARCH(1,1) and OLS')
lines(realized_return_ols[,2],type='l',col='blue')
legend(10,0.045,c('GARCH','OLS'),col=c(2,4),lwd=c(2,2),bty='n',cex=1)
plot(realized_return[,3],type='l',xlab='month',
ylab='Return of GBP',col='red',main='Return comparison of GARCH(1,1) and OLS')
lines(realized_return_ols[,3],type='l',col='blue')
legend(10,0.045,c('GARCH','OLS'),col=c(2,4),lwd=c(2,2),bty='n',cex=1)
par(mfrow=c(1,1))
## daily data
data_daily = read.csv("proj15_daily_exchange_rate.csv")
# Data setup
data_daily = data_daily[!is.na(data_daily[,2]),1:4]
colnames(data_daily) = c('date','AUD','JPY','GBP')
data_daily$date = as.Date(data_daily$date,"%d-%B-%y")
data_daily[,2:4] = 1/data_daily[,2:4]
data_daily[,2:4]=log(data_daily[,2:4])
# Extract data before 2013-07-01
AUD_diff = diff(data_daily[data_daily$date<="2013-07-01",2])
JPY_diff = diff(data_daily[data_daily$date<="2013-07-01",3])
GBP_diff = diff(data_daily[data_daily$date<="2013-07-01",4])
plot(AUD_diff,typ='l',ylab='',main='AUD/US')
plot(JPY_diff,typ='l',ylab='',main='JPY/US')
plot(GBP_diff,typ='l',ylab='',main='GBP/US')
eacf(AUD_diff)
library(rugarch)
egarch11.spec = ugarchspec(variance.model=list(model="eGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0)))# mean equation=constant
m1_E = ugarchfit(egarch11.spec,data= AUD_diff)
m2_E = ugarchfit(egarch11.spec,data= JPY_diff)
m3_E = ugarchfit(egarch11.spec,data= GBP_diff)
library(fGarch)
m1_ap = garchFit(~1+aparch(1,1), data=AUD_diff, trace=F)
m2_ap = garchFit(~1+aparch(1,1), data=JPY_diff, trace=F)
m3_ap = garchFit(~1+aparch(1,1), data=GBP_diff, trace=F)
residual_m1_E = m1_E@fit$residuals
std_residual_m1_E=m1_E@fit$residuals/m1_E@fit$sigma
plot(std_residual_m1_E,typ='l',ylab='Standardized Residuals')
par(mfrow=c(2,2))
acf(residual_m1_E,main='Residual');acf(residual_m1_E^2,main='Squared Residual')
acf(std_residual_m1_E,main='EGARCH(1,1) Standardized Residual');acf(std_residual_m1_E^2,main='EGARCH(1,1) Squared Standardized Residual')
Box.test(m1_E@fit$residuals,lag=12,type=("Ljung-Box"))
Box.test(std_residual_m1_E^2,lag=12,type=("Ljung-Box"))
| /proj15.r | no_license | derek1032/Time-Series-Project | R | false | false | 13,946 | r | options(stringsAsFactors = FALSE)
# Load data of spot and 1-month forward exchange rates
data = read.csv("proj15_spot_forward_exchange_rate.csv")
# Data setup
colnames(data) = c('date','AUD','AUD_f','JPY','JPY_f','GBP','GBP_f')
data$date = as.Date(data$date,"%m/%d/%Y")
# Convert the quoted form to US dollar/currency
data[,2:5] = 1/data[,2:5]
# Take logarithm
data[,2:7] = log(data[,2:7])
# Extract spot and forward exchange rate
spot = data.frame(date=data$date,AUD=data$AUD,JPY=data$JPY,GBP=data$GBP)
forward = data.frame(date=data$date,AUD_f=data$AUD_f,JPY_f=data$JPY_f,GBP_f=data$GBP_f)
# Plot log spot exchange rate
plot(spot[,1:2],typ='l',ylab='AUD',main='Exchange Rate')
plot(spot[,c(1,3)],typ='l',ylab='JPY',main='Exchange Rate')
plot(spot[,c(1,4)],typ='l',ylab='GBP',main='Exchange Rate')
par(mfrow=c(1,2))
# ACF and PACF
acf(spot$AUD);pacf(spot$AUD)
acf(spot$JPY);pacf(spot$JPY)
acf(spot$GBP);pacf(spot$GBP)
par(mfrow=c(1,1))
# ADF test
library(fUnitRoots)
adfTest(spot$AUD,type=c("nc"));adfTest(spot$JPY,type=c("nc"));adfTest(spot$GBP,type=c("nc"))
# Extract data before 2013-07-01
AUD_diff = diff(spot[spot$date<="2013-07-01",2])
JPY_diff = diff(spot[spot$date<="2013-07-01",3])
GBP_diff = diff(spot[spot$date<="2013-07-01",4])
date = spot[spot$date<="2013-07-01",1]
# Plot log return of exchange rate
plot(date[-1],AUD_diff,typ='l',ylab='AUD',main='Difference of Log Exchange Rate (AUD)')
plot(JPY_diff,typ='l',ylab='JPY',main='Difference of Log Exchange Rate (JPY)')
plot(GBP_diff,typ='l',ylab='GBP',main='Difference of Log Exchange Rate (GBP)')
# ACF and PACF of log return of exchange rate
par(mfrow=c(1,2))
acf(AUD_diff,main='Log Return of Exchange Rate(AUD)');pacf(AUD_diff,main='Log Return of Exchange Rate(AUD)')
acf(JPY_diff,main='Log Return of Exchange Rate(JPY)');pacf(JPY_diff,main='Log Return of Exchange Rate(JPY)')
acf(GBP_diff,main='Log Return of Exchange Rate(GBP)');pacf(GBP_diff,main='Log Return of Exchange Rate(GBP)')
par(mfrow=c(1,1))
# ADF test after differencing ## no unit root
adfTest(AUD_diff);adfTest(JPY_diff);adfTest(GBP_diff)
hist(AUD_diff,breaks=20,ylab='Frequency',xlab='',main='Difference of Log Exchange Rate')
install.packages('TSA')
library(TSA)
# Identify a good ARMA order using EACF
eacf(AUD_diff, ar.max = 8, ma.max = 8)
eacf(JPY_diff, ar.max = 8, ma.max = 8)
eacf(GBP_diff, ar.max = 8, ma.max = 8)
## ARCH effect analysis
t.test(AUD_diff)
t.test(JPY_diff)
t.test(GBP_diff)
# Null hypothesis cannot be rejected. The true mean should be equal to zero
# ACF of diff and squared diff of log exchange rate
par(mfrow=c(1,2))
acf(AUD_diff,main='Log Return of Exchange Rate(AUD)',ylim=c(-0.2,0.4))
acf(AUD_diff^2,main='Squared Log Return of Exchange Rate(AUD)',ylim=c(-0.2,0.4))
acf(JPY_diff,main='Log Return of Exchange Rate(JPY)',ylim=c(-0.2,0.4))
acf(JPY_diff^2,main='Squared Log Return of Exchange Rate(JPY)',ylim=c(-0.2,0.4)) ## not too much dependence
acf(GBP_diff,main='Difference of Exchange Rate(GBP)',ylim=c(-0.2,0.4))
acf(GBP_diff^2,main='Squared Log Return of Exchange Rate(GBP)',ylim=c(-0.2,0.4))
par(mfrow=c(1,1))
# Ljung-Box test
Box.test(AUD_diff,lag=12,type=("Ljung-Box"))
Box.test(AUD_diff^2,lag=12,type=("Ljung-Box"))
Box.test(JPY_diff,lag=12,type=("Ljung-Box"))
Box.test(JPY_diff^2,lag=12,type=("Ljung-Box"))
Box.test(GBP_diff,lag=12,type=("Ljung-Box"))
Box.test(GBP_diff^2,lag=12,type=("Ljung-Box"))
library(fGarch)
# Fit data into Garch(1,1), mean equation is a constant
m1 = garchFit(~garch(1,1),data=AUD_diff,trace=F)
m2 = garchFit(~garch(1,1),data=JPY_diff,trace=F)
m3 = garchFit(~garch(1,1),data=GBP_diff,trace=F)
summary(m1);summary(m2);summary(m3)
library(rugarch)
# IGARCH for AUD
spec = ugarchspec(variance.model=list(model="iGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0))) # mean equation=constant
m1_I = ugarchfit(spec=spec,data=AUD_diff)
m1_I
# APARCH
m1_ap = garchFit(~1+aparch(1,1), data=AUD_diff, trace=F)
# EGARCH for AUD
egarch11.spec = ugarchspec(variance.model=list(model="eGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0)))# mean equation=constant
m1_E = ugarchfit(egarch11.spec,data= AUD_diff)
#m2_E = ugarchfit(egarch11.spec,data= JPY_diff)
#m3_E = ugarchfit(egarch11.spec,data= GBP_diff)
summary(m1_E)
m1_ged = garchFit(~garch(1,1),data=AUD_diff,trace=F,cond.dist=c("ged"))
m2_ged = garchFit(~garch(1,1),data=JPY_diff,trace=F,cond.dist=c("ged"))
m3_ged = garchFit(~garch(1,1),data=GBP_diff,trace=F,cond.dist=c("ged"))
summary(m1_ged);summary(m2_ged);summary(m3_ged)
# In the case of AUD/US, alpha1 and beta1 are significant at the level 0.01
# We think GARCH(1,1) model can be used to predcit log return of exchange rate
m1_res = m1@residuals
m1_res_std = m1@residuals/volatility(m1)
m2_res = m2@residuals
m2_res_std = m2@residuals/volatility(m2)
m3_res = m3@residuals
m3_res_std = m3@residuals/volatility(m3)
# Observe volatility and log return of exchange rate
plot(AUD_diff,typ='l',ylab='USD/AUD')
lines(volatility(m1),col='red')
legend(160,0.1,c('log return of USD/AUD','volatility'),col=c(1,2),lwd=c(2,2))
# ACFs
par(mfrow=c(2,2))
acf(m1_res,main='Residual (AUD)')
acf(m1_res^2,main='Residual Squared (AUD)')
acf(m1_res_std,main='GARCH(1,1) Std Residual (AUD)')
acf(m1_res_std^2,main='GARCH(1,1) Std Residual Squared (AUD)')
acf(m2_res,main='Residual (JPY)')
acf(m2_res^2,main='Residual Squared (JPY)')
acf(m2_res_std,main='GARCH(1,1) Std Residual (JPY)')
acf(m2_res_std^2,main='GARCH(1,1) Std Residual Squared (JPY)')
acf(m3_res,main='Residual (GBP)')
acf(m3_res^2,main='Residual Squared (GBP)')
acf(m3_res_std,main='GARCH(1,1) Std Residual (GBP)')
acf(m3_res_std^2,main='GARCH(1,1) Std Residual Squared (GBP)')
# PACFs
pacf(m1_res,main='Residual (AUD)',ylim=c(-0.1,0.5))
pacf(m1_res^2,main='Residual Squared (AUD)',ylim=c(-0.1,0.5))
pacf(m1_res_std,main='GARCH(1,1) Std Residual (AUD)',ylim=c(-0.1,0.5))
pacf(m1_res_std^2,main='GARCH(1,1) Std Residual Squared (AUD)',ylim=c(-0.1,0.5))
pacf(m2_res,main='Residual (JPY)',ylim=c(-0.15,0.5))
pacf(m2_res^2,main='Residual Squared (JPY)',ylim=c(-0.15,0.5))
pacf(m2_res_std,main='GARCH(1,1) Std Residual (JPY)',ylim=c(-0.15,0.5))
pacf(m2_res_std^2,main='GARCH(1,1) Std Residual Squared (JPY)',ylim=c(-0.15,0.5))
pacf(m3_res,main='Residual (GBP)',ylim=c(-0.1,0.5))
pacf(m3_res^2,main='Residual Squared (GBP)',ylim=c(-0.1,0.5))
pacf(m3_res_std,main='GARCH(1,1) Std Residual (GBP)',ylim=c(-0.1,0.5))
pacf(m3_res_std^2,main='GARCH(1,1) Std Residual Squared (GBP)',ylim=c(-0.1,0.5))
par(mfrow=c(1,1))
plot(m1_res_std,typ='l',ylab='',main='Standardized Residuals (AUD)')
plot(m2_res_std,typ='l',ylab='',main='Standardized Residuals (JPY)')
plot(m3_res_std,typ='l',ylab='',main='Standardized Residuals (GBP)')
n = nrow(spot) # number of full data
m = nrow(spot[spot$date<="2013-07-01",])
realized_return = as.data.frame(matrix(rep(0,(n-m)*3),nrow=n-m))
colnames(realized_return)=c('AUD','JPY','GBP')
data_temp = c()
for(j in 1:3)
{
for (i in 1:(n-m))
{
data_temp = diff(spot[1:m-1+i,j+1])
mdl = garchFit(~garch(1,1),data=data_temp,trace=F)
pred = predict(mdl,n.ahead = 1)[,3] # predict the volatility of the next day
mdl_res_std = mdl@residuals/volatility(mdl)
epsilon = mdl_res_std[length(mdl_res_std)]
if (mdl@fit$coef[1]+pred * epsilon > (forward[m-1+i,j+1] - spot[m-1+i,j+1]))
realized_return[i,j] = spot[m+i,j+1] - forward[m-1+i,j+1]
else
realized_return[i,j] = forward[m-1+i,j+1] - spot[m+i,j+1]
}
}
rm(i,j,mdl,pred,epsilon,data_temp)
colMeans(realized_return, na.rm = FALSE, dims = 1)
# Using Generalized Error as underlying distributions of epsilon_t
realized_return_ged = as.data.frame(matrix(rep(0,(n-m)*3),nrow=n-m))
colnames(realized_return_ged)=c('AUD','JPY','GBP')
data_temp=c()
for(j in 1:3)
{
for (i in 1:(n-m))
{
data_temp = diff(spot[1:m-1+i,j+1])
mdl = garchFit(~garch(1,1),data=data_temp,trace=F,cond.dist=c("ged"))
pred = predict(mdl,n.ahead = 1)[,3] # predict the volatility of the next day
mdl_res_std = mdl@residuals/volatility(mdl)
epsilon = sample(mdl_res_std, 1, replace = TRUE)
if (mdl@fit$coef[1]+pred * epsilon > (forward[m-1+i,j+1] - spot[m-1+i,j+1]))
realized_return_ged[i,j] = spot[m+i,j+1] - forward[m-1+i,j+1]
else
realized_return_ged[i,j] = forward[m-1+i,j+1] - spot[m+i,j+1]
}
}
rm(i,j,mdl,pred,epsilon,data_temp)
# OLS methods
realized_return_ols = as.data.frame(matrix(rep(0,(n-m)*3),nrow=n-m))
colnames(realized_return_ols)=c('AUD','JPY','GBP')
alpha_temp = c()
beta_temp = c()
for(j in 1:3)
{
for (i in 1:(n-m))
{
y = diff(spot[1:(m-1+i),j+1])
X = forward[1:(m-2+i),j+1]-spot[1:(m-2+i),j+1]
mdl = lm(y~X)
if (mdl$coefficients[1]+ mdl$coefficients[2]*((forward[m-1+i,j+1] - spot[m-1+i,j+1]))> (forward[m-1+i,j+1] - spot[m-1+i,j+1]))
realized_return_ols[i,j] = spot[m+i,j+1] - forward[m-1+i,j+1]
else
realized_return_ols[i,j] = forward[m-1+i,j+1] - spot[m+i,j+1]
}
}
rm(i,j,y,X,b)
colMeans(realized_return_ols, na.rm = FALSE, dims = 1)
## IGARCH
install.packages("rugarch")
library(rugarch)
realized_return_AUD = c()
for (i in 1:(n-m))
{
data_temp = diff(spot[1:m-1+i,2])
spec = ugarchspec(variance.model=list(model="iGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0))) # mean equation=constant
mdl = ugarchfit(spec=spec,data=data_temp)
# standardized residuals
residual_std = mdl@fit$residuals/mdl@fit$sigma
# predict the volatility of the next day
pred1 = as.numeric(sqrt(mdl@fit$coef[2]+mdl@fit$coef[3]*mdl@fit$residuals[length(mdl@fit$residuals)]^2
+mdl@fit$coef[4]*mdl@fit$sigma[length(mdl@fit$sigma)]^2))
# select the last standardized residual as the next one
epsilon = residual_std[length(residual_std)]
# sample(residual_std,size=1,replace=TRUE)
if (mdl@fit$coef[1]+pred1 * epsilon > (forward[m-1+i,2] - spot[m-1+i,2]))
realized_return_AUD[i] = spot[m+i,2] - forward[m-1+i,2]
else
realized_return_AUD[i] = forward[m-1+i,2] - spot[m+i,2]
}
rm(spec,mdl,residual_std,pred1,epsilon)
realized_return_AUD = data.frame(rtn = realized_return_AUD)
mean(realized_return_AUD$rtn)
# Relevant measures
TB = read.csv("TB30.csv")
SP500 = read.csv("sp500.csv")
TB30 = cbind(TB[,2],TB[,2],TB[,2])
excess_return_3 = realized_return-TB30
excess_return_mkt = SP500[,2] - TB[,2]
excess_return_aud = realized_return_AUD$rtn-TB$t30ret
colMeans(excess_return_3, na.rm = FALSE, dims = 1)
#t.test(excess_return_3[,1],mu=0)
#t.test(excess_return_3[,2],mu=0)
#t.test(excess_return_3[,3],mu=0)
sharpe_ratio_aud = mean(excess_return_3$AUD) /sd(excess_return_3$AUD)
sharpe_ratio_jpy = mean(excess_return_3$JPY) /sd(excess_return_3$JPY)
sharpe_ratio_gbp = mean(excess_return_3$GBP) /sd(excess_return_3$GBP)
sharpe_ratio_mkt = mean(excess_return_mkt) /sd(excess_return_mkt)
sharpe_ratio_aud_igarch = mean(excess_return_aud) /sd(excess_return_aud)
cat('Sharpe Ratios:',sharpe_ratio_aud,sharpe_ratio_jpy,sharpe_ratio_gbp)
# Count the winning months
win = c(sum(realized_return$AUD>0),sum(realized_return$JPY>0),sum(realized_return$GBP>0))
sum(realized_return_AUD$rtn>0)
# Count the lsoing months
lose = c(sum(realized_return$AUD<0),sum(realized_return$JPY<0),sum(realized_return$GBP<0))
sum(realized_return_AUD$rtn<0)
cat('USD/AUD',sharpe_ratio_aud,'USD/JPY',sharpe_ratio_jpy,'USD/GBP',sharpe_ratio_gbp,'MARKET',sharpe_ratio_mkt)
# Plot realized returns of GARCH(1,1) and OLS
par(mfrow=c(3,1))
plot(realized_return[,1],type='l',xlab='month',
ylab='Return of AUD',col='red',main='Return comparison of GARCH(1,1) and OLS')
lines(realized_return_ols[,1],type='l',col='blue')
legend(10,0.045,c('GARCH','OLS'),col=c(2,4),lwd=c(2,2),bty='n',cex=1)
plot(realized_return[,2],type='l',xlab='month',
ylab='Return of JPY',col='red',main='Return comparison of GARCH(1,1) and OLS')
lines(realized_return_ols[,2],type='l',col='blue')
legend(10,0.045,c('GARCH','OLS'),col=c(2,4),lwd=c(2,2),bty='n',cex=1)
plot(realized_return[,3],type='l',xlab='month',
ylab='Return of GBP',col='red',main='Return comparison of GARCH(1,1) and OLS')
lines(realized_return_ols[,3],type='l',col='blue')
legend(10,0.045,c('GARCH','OLS'),col=c(2,4),lwd=c(2,2),bty='n',cex=1)
par(mfrow=c(1,1))
## daily data
data_daily = read.csv("proj15_daily_exchange_rate.csv")
# Data setup
data_daily = data_daily[!is.na(data_daily[,2]),1:4]
colnames(data_daily) = c('date','AUD','JPY','GBP')
data_daily$date = as.Date(data_daily$date,"%d-%B-%y")
data_daily[,2:4] = 1/data_daily[,2:4]
data_daily[,2:4]=log(data_daily[,2:4])
# Extract data before 2013-07-01
AUD_diff = diff(data_daily[data_daily$date<="2013-07-01",2])
JPY_diff = diff(data_daily[data_daily$date<="2013-07-01",3])
GBP_diff = diff(data_daily[data_daily$date<="2013-07-01",4])
plot(AUD_diff,typ='l',ylab='',main='AUD/US')
plot(JPY_diff,typ='l',ylab='',main='JPY/US')
plot(GBP_diff,typ='l',ylab='',main='GBP/US')
eacf(AUD_diff)
library(rugarch)
egarch11.spec = ugarchspec(variance.model=list(model="eGARCH",garchOrder=c(1,1)),
mean.model=list(armaOrder=c(0,0)))# mean equation=constant
m1_E = ugarchfit(egarch11.spec,data= AUD_diff)
m2_E = ugarchfit(egarch11.spec,data= JPY_diff)
m3_E = ugarchfit(egarch11.spec,data= GBP_diff)
library(fGarch)
m1_ap = garchFit(~1+aparch(1,1), data=AUD_diff, trace=F)
m2_ap = garchFit(~1+aparch(1,1), data=JPY_diff, trace=F)
m3_ap = garchFit(~1+aparch(1,1), data=GBP_diff, trace=F)
residual_m1_E = m1_E@fit$residuals
std_residual_m1_E=m1_E@fit$residuals/m1_E@fit$sigma
plot(std_residual_m1_E,typ='l',ylab='Standardized Residuals')
par(mfrow=c(2,2))
acf(residual_m1_E,main='Residual');acf(residual_m1_E^2,main='Squared Residual')
acf(std_residual_m1_E,main='EGARCH(1,1) Standardized Residual');acf(std_residual_m1_E^2,main='EGARCH(1,1) Squared Standardized Residual')
Box.test(m1_E@fit$residuals,lag=12,type=("Ljung-Box"))
Box.test(std_residual_m1_E^2,lag=12,type=("Ljung-Box"))
|
#library(foreach)
#library(doParallel)
#registerDoParallel(makeCluster(2))
ti <- Sys.time()
binario <- function(d, l) {
b <- rep(FALSE, l)
while (l > 0 | d > 0) {
b[l] <- (d %% 2 == 1)
l <- l - 1
d <- bitwShiftR(d, 1)
}
return(b)
}
decimal <- function(bits, l) {
valor <- 0
for (pos in 1:l) {
valor <- valor + 2^(l - pos) * bits[pos]
}
return(valor)
}
modelos <- read.csv("digitos.modelo", sep=" ", header=FALSE, stringsAsFactors=F)
#modelos[modelos=='n'] <- 0.995
#modelos[modelos=='g'] <- 0.92
#modelos[modelos=='b'] <- 0.002
r <- 5
c <- 3
dim <- r * c
#t1 <-300
tasa <- 0.15
tranqui <- 0.99
tope <- 9
digitos <- 0:tope
k <- length(digitos)
contadores <- matrix(rep(0, k*(k+1)), nrow=k, ncol=(k+1))
rownames(contadores) <- 0:tope
colnames(contadores) <- c(0:tope, NA)
n <- floor(log(k-1, 2)) + 1
neuronas <- matrix(runif(n * dim), nrow=n, ncol=dim) # perceptrones
#no paralelizar
for (t in 1:5000) { # entrenamiento
d <- sample(0:tope, 1)
pixeles <- runif(dim) < modelos[d + 1,]
correcto <- binario(d, n)
for (i in 1:n) { # paralelizar
w <- neuronas[i,]
deseada <- correcto[i]
resultado <- sum(w * pixeles) >= 0
if (deseada != resultado) {
ajuste <- tasa * (deseada - resultado)
tasa <- tranqui * tasa
neuronas[i,] <- w + ajuste * pixeles
}
}
}
neu <- function(){
#for (t in 1:t1) { # prueba
d <- sample(0:tope, 1)
pixeles <- runif(dim) < modelos[d + 1,] # fila 1 contiene el cero, etc.
correcto <- binario(d, n)
salida <- rep(FALSE, n)
for (i in 1:n) { # paralelizar
w <- neuronas[i,]
deseada <- correcto[i]
resultado <- sum(w * pixeles) >= 0
salida[i] <- resultado
}
r <- min(decimal(salida, n), k) # todos los no-existentes van al final
return(r == d)
}
contadores <- foreach(t = 1:t1, .combine = c) %dopar% neu()
stopImplicitCluster()
con <- (sum(contadores)/t1)*100
print(con)
#tf <- Sys.time()
#t <- tf - ti
#print(t) | /p12/p12_3mcalor.R | no_license | PabloChavez94/Simulacion | R | false | false | 2,035 | r | #library(foreach)
#library(doParallel)
#registerDoParallel(makeCluster(2))
ti <- Sys.time()
binario <- function(d, l) {
b <- rep(FALSE, l)
while (l > 0 | d > 0) {
b[l] <- (d %% 2 == 1)
l <- l - 1
d <- bitwShiftR(d, 1)
}
return(b)
}
decimal <- function(bits, l) {
valor <- 0
for (pos in 1:l) {
valor <- valor + 2^(l - pos) * bits[pos]
}
return(valor)
}
modelos <- read.csv("digitos.modelo", sep=" ", header=FALSE, stringsAsFactors=F)
#modelos[modelos=='n'] <- 0.995
#modelos[modelos=='g'] <- 0.92
#modelos[modelos=='b'] <- 0.002
r <- 5
c <- 3
dim <- r * c
#t1 <-300
tasa <- 0.15
tranqui <- 0.99
tope <- 9
digitos <- 0:tope
k <- length(digitos)
contadores <- matrix(rep(0, k*(k+1)), nrow=k, ncol=(k+1))
rownames(contadores) <- 0:tope
colnames(contadores) <- c(0:tope, NA)
n <- floor(log(k-1, 2)) + 1
neuronas <- matrix(runif(n * dim), nrow=n, ncol=dim) # perceptrones
#no paralelizar
for (t in 1:5000) { # entrenamiento
d <- sample(0:tope, 1)
pixeles <- runif(dim) < modelos[d + 1,]
correcto <- binario(d, n)
for (i in 1:n) { # paralelizar
w <- neuronas[i,]
deseada <- correcto[i]
resultado <- sum(w * pixeles) >= 0
if (deseada != resultado) {
ajuste <- tasa * (deseada - resultado)
tasa <- tranqui * tasa
neuronas[i,] <- w + ajuste * pixeles
}
}
}
neu <- function(){
#for (t in 1:t1) { # prueba
d <- sample(0:tope, 1)
pixeles <- runif(dim) < modelos[d + 1,] # fila 1 contiene el cero, etc.
correcto <- binario(d, n)
salida <- rep(FALSE, n)
for (i in 1:n) { # paralelizar
w <- neuronas[i,]
deseada <- correcto[i]
resultado <- sum(w * pixeles) >= 0
salida[i] <- resultado
}
r <- min(decimal(salida, n), k) # todos los no-existentes van al final
return(r == d)
}
contadores <- foreach(t = 1:t1, .combine = c) %dopar% neu()
stopImplicitCluster()
con <- (sum(contadores)/t1)*100
print(con)
#tf <- Sys.time()
#t <- tf - ti
#print(t) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/digest.R
\name{readDigestFile}
\alias{readDigestFile}
\title{Read digest file}
\usage{
readDigestFile(opts, endpoint = "mc-all/grid/digest.txt")
}
\arguments{
\item{opts}{simulation options}
\item{endpoint}{Suffix of path for digest file
Default is : "mc-all/grid/digest.txt" added to opts$simDataPath}
}
\value{
list of 5 tables (begin, areas, middle, links lin., links quad.)
}
\description{
Read digest file
}
| /man/readDigestFile.Rd | no_license | cran/antaresRead | R | false | true | 512 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/digest.R
\name{readDigestFile}
\alias{readDigestFile}
\title{Read digest file}
\usage{
readDigestFile(opts, endpoint = "mc-all/grid/digest.txt")
}
\arguments{
\item{opts}{simulation options}
\item{endpoint}{Suffix of path for digest file
Default is : "mc-all/grid/digest.txt" added to opts$simDataPath}
}
\value{
list of 5 tables (begin, areas, middle, links lin., links quad.)
}
\description{
Read digest file
}
|
library(data.table)
library(ggplot2)
library(grid)
library(gridExtra)
library(cowplot)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("util.R")
stdize <- function(x, ...) {(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
atom.counts <- data.table(read.csv("data/atom-counts_2018-09-05_for-debugging-emse.csv"))
#colnames(atom.counts) <- sapply(colnames(atom.counts), function(s) substr(s,3,99))
proj.order <- c("linux", "freebsd", "gecko-dev", "webkit",
"gcc", "clang", "mongo", "mysql-server", "subversion", "git",
"emacs", "vim", "httpd", "nginx")
proj.domain <- factor(c("os", "os", "browser", "browser", "compiler", "compiler", "db", "db", "vcs", "vcs", "editor", "editor", "webserver", "webserver"),
levels=domain.levels,
ordered=TRUE)
atom.counts <- atom.counts[match(proj.order, atom.counts$project),]
atom.counts$domain <- proj.domain
atom.count.nums <- atom.counts[, -c("project")][, order(-colSums(atom.counts[, -c("project", "domain")])), with=FALSE]
atom.rates.nums <- sapply(atom.count.nums, function(col) stdize(col / atom.counts$all.nodes))
atom.rates.wide <- data.table(cbind(atom.counts[, .(project, domain)], atom.rates.nums))[, -c("all.nodes")]
atom.key.order <- tail(names(atom.count.nums), -2)
atom.display.order <- unlist(atom.name.conversion[atom.key.order])
atom.rates <- data.table(melt(atom.rates.wide[,-c("non.atoms")], id.vars=c("project", "domain"), variable.name="atom", value.name = "rate"))
atom.rates[, atom := convert.atom.names(atom)]
atom.rates[atom=='Reversed Subscripts']
sum(atom.counts[, reversed.subscript])
atom.rate.per.project <- ggplot(data=atom.rates, aes(project, atom)) +
geom_point(colour="black", aes(size=1)) +
geom_point(colour="white", aes(size=0.8)) +
geom_point(aes(size = 0.81*rate, colour=domain)) +
scale_size_continuous(range = c(-.4,6)) +
scale_colour_manual(values = sap.qualitative.palette) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.4), axis.ticks.x=element_blank()) +
theme(axis.ticks.y=element_blank(), axis.title.y=element_blank()) +
theme(axis.line=element_blank()) +
theme(legend.position="none") +
scale_y_discrete(limits=rev(atom.display.order)) +
scale_x_discrete(limits=proj.order) +
labs(x="Project") +
ggtitle("Atom Rate Per Project")
ggsave("img/atom_rate_per_project.pdf", atom.rate.per.project, width=(width<-132), height=width*0.92, units = "mm")
##################################
# Clustered Spot Matrix
##################################
proj.to.domain <- as.list(as.character(proj.domain))
names(proj.to.domain) <- proj.order
project.atoms.order <- cluster.long(atom.rates, 'atom', 'project', 'rate')
atom.rate.per.project.clustered <-
ggplot(data=atom.rates.clustered, aes(project, atom)) +
theme_classic() +
geom_point(colour="black", aes(size=1)) +
geom_point(colour="white", aes(size=0.8)) +
geom_point(aes(size = 0.81*rate, colour=domain)) +
scale_size_continuous(range = c(-.4,7)) +
scale_colour_manual(values = domain.colors) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.4), axis.ticks.x=element_blank()) +
theme(axis.ticks.y=element_blank(), axis.title.y=element_blank()) +
theme(axis.line=element_blank()) +
theme(legend.position="none") +
scale_y_discrete(limits=project.atoms.order$rowName) +
scale_x_discrete(limits=project.atoms.order$colName) +
#, labels=paste(clustered.project.order, substring(proj.to.domain[clustered.project.order],1,3), sep=' - ')) +
labs(x="Project")
atom.rate.per.project.clustered
ggsave("img/atom_rate_per_project_clustered.pdf", atom.rate.per.project.clustered, width=(width<-128), height=width*0.91, units = "mm")
############################
# all projects combined
############################
library(dplyr)
all.atom.counts.by.project <- atom.counts[, .(project, all.atoms = Reduce(`+`, .SD)),.SDcols=atom.names.dot]
all.atom.counts <- atom.counts[, -c('project','domain')][, lapply(.SD, sum)]
all.atom.rates.wide <- all.atom.counts[, -c('all.nodes', 'non.atoms')] / all.atom.counts$all.nodes
all.atom.rates <- data.table(data.frame(atom = unlist(atom.name.conversion[names(all.atom.rates.wide)]), rate = t(all.atom.rates.wide)))
atom.occurrence.rate <- ggplot(all.atom.rates, aes(x = reorder(atom, rate), y = rate)) +
theme_classic() +
geom_bar(stat="identity", fill=colors2[1]) +
geom_text(aes(y=0.0015, label=formatC(signif(rate,digits=2), digits=2, flag="#"),
color=atom %in% c('Omitted Curly Brace','Operator Precedence')), angle=0, hjust=0) +
theme(#axis.text.x=element_text(angle=90, hjust=1, vjust=.4), axis.text.y = element_blank(),
axis.text.x=element_blank(),
axis.ticks = element_blank(), axis.line = element_blank()) +
scale_y_continuous(limits = c(0.0,0.0073)) +
guides(color=FALSE) +
coord_flip() +
scale_color_manual(values=c('black', 'white')) +
labs(x="Atom", y="Occurrence Rate")
atom.occurrence.rate
ggsave("img/atom_occurrence_rate.pdf", atom.occurrence.rate, width=(width<-140), height=width*0.7, units = "mm")
# overall atom rate for paper
all.atom.ast.rate <- all.atom.counts[, (all.nodes - non.atoms) / all.nodes]
1/all.atom.ast.rate
nodes.per.omitted.curly <- 1/all.atom.counts[, omitted.curly.braces / all.nodes]
#################################
# all atoms by effect size
##################################
atom.effect <- data.table(merge(all.atom.rates, atom.effect.sizes[, .(atom = convert.atom.names(atom), effect.size)]))
confusingness.vs.prevalence.correlation <- with(atom.effect, cor(rate, effect.size)) # correlation: -0.45
atom.effect$offset.x <- atom.effect$offset.y <- 0
atom.effect[atom=="Preprocessor in Statement", c("offset.x", "offset.y") := .(0, .15)]
atom.effect[atom=="Conditional Operator", c("offset.x", "offset.y") := .(-1, -1.5)]
atom.effect[atom=="Comma Operator", c("offset.x", "offset.y") := .(-.5, .5)]
atom.effect[atom=="Repurposed Variable", c("offset.x", "offset.y") := .(0, -.5)]
atom.effect[atom=="Type Conversion", c("offset.x", "offset.y") := .(-3.5, -.17)]
confusingness.vs.prevalence <-
ggplot(atom.effect, aes(effect.size, rate)) +
theme_classic() +
geom_point(size=2.5, color=colors2dark[2]) +
geom_smooth(method="lm", se=FALSE, fullrange=TRUE, color=colors2dark[1], size=1) + #, aes(color="Exp Model"), formula= (y ~ x^2+1)) +
scale_x_continuous(limits = c(0.2, 0.75)) +
scale_y_log10(limits = c(5*10^-8, 9*10^-3)) +
geom_text(aes(label=atom, x=.009+effect.size+.003*offset.x, y=rate+0.0001*offset.y), hjust=0, vjust=.6, angle=-15, size=3) +
theme(axis.text.x=element_text(angle=90, hjust=1)) +
annotate("text", x=0.35, y=3*10^-6, label=paste0("r = ", round(confusingness.vs.prevalence.correlation, 2))) +
#ggtitle("Confusingness vs Prevalence", subtitle="Do less confusing patterns occur more often?") +
labs(x="Effect Size", y="Occurrence Rate (log)")
confusingness.vs.prevalence
ggsave("img/confusingness_vs_prevalence.pdf", confusingness.vs.prevalence, width=(width<-150), height=width*0.6, units = "mm")
################################################
# all projects by raw confusion of C question
# (not, the difference between C/NC)
################################################
## from snippet_study/results.R
# dput(atom.contingencies[, .(atom.name, correct.rate.C = round((TT + TF) / (TT + TF + FT + FF), digits=2))][order(atom.name)][,correct.rate.C])
correct.rate.C <- c(0.45, 0.48, 0.76, 0.78, 0.25, 0.3, 0.57, 0.62, 0.75, 0.54, 0.64, 0.3, 0.47, 0.52, 0.58)
atom.correct.C <- merge(all.atom.rates, cbind.data.frame(atom = atom.names, correct.rate.C))
with(atom.correct.C, cor(rate, 1-correct.rate.C))
ggplot(atom.correct.C, aes(rate, correct.rate.C)) + geom_point() +
geom_text(aes(label=atom), hjust=-0.1, angle=45, size=2) +
#geom_smooth(method="lm", aes(color="Exp Model"), formula= (y ~ x^2+1)) +
theme(axis.text.x=element_text(angle=90, hjust=1))
################################################
# atom count vs LOC in project
################################################
# cat ~/atom-finder/file_sizes_sorted.txt | sed 's,/home/dgopstein/opt/src/atom-finder/\([^/]*\)/,\1 ,' | ruby -lane 'BEGIN{h=Hash.new{|x| 0}}; count, proj, _ = $_.split; h[proj] += count.to_i; END{ p h}'
proj.loc <- data.table(proj=c("clang", "freebsd", "gcc", "gecko-dev", "linux", "mongo", "webkit", "emacs", "git", "subversion", "vim", "mysql-server", "nginx", "httpd"),
loc=c(1969346, 20252205, 5450514, 11380215, 22626962, 3864455, 4954408, 480268, 253422, 707786, 451820, 2979215, 186760, 317717))
loc.rate <- merge(proj.loc, atom.counts, by.x="proj", by.y="project")
ggplot(loc.rate, aes(loc, atom.rate)) +
geom_point() +
scale_x_log10()
################################################
# average atoms per line, and lines per atom
################################################
# github.com/AlDanial/cloc v 1.80 T=801.49 s (640.5 files/s, 141127.1 lines/s)
# ----------------------------------------------------------------------------------------
# Language files blank comment code
# ----------------------------------------------------------------------------------------
# C 85648 5324180 5970700 29452420
# C++ 53421 2226119 2239534 12034944
# C/C++ Header 74057 2152838 4175043 11390728
atom.finder.corpus.sloc <- 29452420 + 12034944 + 11390728
total.n.atoms <- sum(all.atom.counts[, -c('all.nodes', 'non.atoms')])
total.n.atoms
# line rates for paper
atoms.per.line <- total.n.atoms/atom.finder.corpus.sloc
lines.per.atom <- 1/atoms.per.line
################################################
# combined atom counts per project
################################################
all.atom.proj.rates <- atom.counts[, -c('non.atoms')][, .(rate = (base::sum(.SD) - all.nodes) / all.nodes), by=c('project', 'domain')]
all.atom.proj.rates.plot <- ggplot(all.atom.proj.rates, aes(x = reorder(project, rate), y = rate)) +
theme_classic() +
theme(plot.margin = margin(l=18, unit="mm")) +
geom_bar(stat="identity", aes(fill=domain)) +
scale_fill_manual(values=domain.colors) +
geom_text(aes(y=0.0005, label=sprintf("%0.3f", round(rate, digits=3))),
color='black', angle=0, hjust=0, size=2.5) +
theme(axis.text.x=element_blank(), axis.ticks = element_blank(), axis.line = element_blank(), axis.title.x = element_blank()) +
theme(axis.text.y=element_text(margin=margin(r=-7,"pt"), vjust=0.4)) +
theme(legend.position = c(0.87, 0.36), legend.key.size = unit(0.58,"line")) +
guides(color=FALSE) +
coord_flip() +
labs(x="Project", fill="Domain")
all.atom.proj.rates.plot
ggsave("img/all_atom_proj_rates.pdf", all.atom.proj.rates.plot, width=(width<-130), height=width*.3, units = "mm")
########################################
# Plot of Atom Effect Size (for slide deck)
########################################
atom.effect
ggplot(atom.effect, aes(reorder(atom, effect.size), effect.size)) +
geom_bar(stat="identity", fill=colors2[1]) +
geom_text(aes(y=0.05, label=sprintf("%.02f", signif(effect.size, digits=2))), color="#FFFFFF", angle=0, hjust=0, fontface='bold') +
theme_classic() +
theme(axis.line=element_blank(), axis.ticks = element_blank()) +
theme(axis.text.y = element_text(hjust = 1, vjust=.4, size=16)) +
theme(axis.text.x = element_blank()) +
theme(axis.title = element_text(size=20)) +
theme(axis.title.y = element_text(margin = margin(t=0, r=20, b=0, l=0))) +
labs(x = 'Atom of Confusion', y = 'Effect Size (Confusingness)') +
coord_flip()
########################################
# Compare atom rates with regular node rates
########################################
all.node.counts <- data.table(read.csv('data/all-node-counts_2018-08-31_for-emse.csv'))
all.node.total <- all.node.counts[, sum(count)]
all.node.counts[, rate := count / all.node.total]
print(all.node.counts, nrows=200)
selected.node.counts <- all.node.counts[node.type %in% c('<IfStatement>', ':not', ':multiply', ':divide', ':multiplyAssign', ':divideAssign', ':throw')]
node.occurrence.rate <- ggplot(selected.node.counts, aes(x = reorder(node.type, rate), y = rate)) +
theme_classic() +
geom_bar(stat="identity", fill=colors2[1]) +
geom_text(aes(y=0.0010, label=formatC(signif(rate,digits=2), digits=2, flag="#"),
color=node.type %in% c('Omitted Curly Brace','Operator Precedence')), angle=0, hjust=0) +
theme(#axis.text.x=element_text(angle=90, hjust=1, vjust=.4), axis.text.y = element_blank(),
axis.text.x=element_blank(),
axis.ticks = element_blank(), axis.line = element_blank()) +
scale_y_continuous(limits = c(0.0,0.013)) +
guides(color=FALSE) +
coord_flip() +
scale_color_manual(values=c('black', 'white')) +
labs(x="Node", y="Occurrence Rate")
node.occurrence.rate
ggsave("img/node_occurrence_rate.pdf", node.occurrence.rate, width=(width<-140), height=width*0.7, units = "mm")
atom.node.rates <- rbind(selected.node.counts[, .(name = node.type, rate, type="node")],
all.atom.rates[, .(name = atom, rate, type="atom")])
atom.node.occurrence.rate <- ggplot(atom.node.rates, aes(x = reorder(name, rate), y = rate)) +
theme_classic() +
geom_bar(stat="identity", aes(fill=colors2[as.integer(as.factor(type))])) +
geom_text(aes(y=0.0010, label=formatC(signif(rate,digits=2), digits=2, flag="#"),
color=rate>0.001), angle=0, hjust=0) +
theme(#axis.text.x=element_text(angle=90, hjust=1, vjust=.4), axis.text.y = element_blank(),
axis.text.x=element_blank(),
axis.ticks = element_blank(), axis.line = element_blank()) +
scale_y_continuous(limits = c(0.0,0.013)) +
guides(color=FALSE, fill=FALSE) +
coord_flip() +
scale_color_manual(values=c('black', 'white')) +
labs(x="Node", y="Occurrence Rate")
atom.node.occurrence.rate
as.integer(factor(atom.node.rates$type))
| /src/analysis/atom_counts.R | permissive | dgopstein/atom-finder | R | false | false | 14,052 | r | library(data.table)
library(ggplot2)
library(grid)
library(gridExtra)
library(cowplot)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("util.R")
stdize <- function(x, ...) {(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
atom.counts <- data.table(read.csv("data/atom-counts_2018-09-05_for-debugging-emse.csv"))
#colnames(atom.counts) <- sapply(colnames(atom.counts), function(s) substr(s,3,99))
proj.order <- c("linux", "freebsd", "gecko-dev", "webkit",
"gcc", "clang", "mongo", "mysql-server", "subversion", "git",
"emacs", "vim", "httpd", "nginx")
proj.domain <- factor(c("os", "os", "browser", "browser", "compiler", "compiler", "db", "db", "vcs", "vcs", "editor", "editor", "webserver", "webserver"),
levels=domain.levels,
ordered=TRUE)
atom.counts <- atom.counts[match(proj.order, atom.counts$project),]
atom.counts$domain <- proj.domain
atom.count.nums <- atom.counts[, -c("project")][, order(-colSums(atom.counts[, -c("project", "domain")])), with=FALSE]
atom.rates.nums <- sapply(atom.count.nums, function(col) stdize(col / atom.counts$all.nodes))
atom.rates.wide <- data.table(cbind(atom.counts[, .(project, domain)], atom.rates.nums))[, -c("all.nodes")]
atom.key.order <- tail(names(atom.count.nums), -2)
atom.display.order <- unlist(atom.name.conversion[atom.key.order])
atom.rates <- data.table(melt(atom.rates.wide[,-c("non.atoms")], id.vars=c("project", "domain"), variable.name="atom", value.name = "rate"))
atom.rates[, atom := convert.atom.names(atom)]
atom.rates[atom=='Reversed Subscripts']
sum(atom.counts[, reversed.subscript])
atom.rate.per.project <- ggplot(data=atom.rates, aes(project, atom)) +
geom_point(colour="black", aes(size=1)) +
geom_point(colour="white", aes(size=0.8)) +
geom_point(aes(size = 0.81*rate, colour=domain)) +
scale_size_continuous(range = c(-.4,6)) +
scale_colour_manual(values = sap.qualitative.palette) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.4), axis.ticks.x=element_blank()) +
theme(axis.ticks.y=element_blank(), axis.title.y=element_blank()) +
theme(axis.line=element_blank()) +
theme(legend.position="none") +
scale_y_discrete(limits=rev(atom.display.order)) +
scale_x_discrete(limits=proj.order) +
labs(x="Project") +
ggtitle("Atom Rate Per Project")
ggsave("img/atom_rate_per_project.pdf", atom.rate.per.project, width=(width<-132), height=width*0.92, units = "mm")
##################################
# Clustered Spot Matrix
##################################
proj.to.domain <- as.list(as.character(proj.domain))
names(proj.to.domain) <- proj.order
project.atoms.order <- cluster.long(atom.rates, 'atom', 'project', 'rate')
atom.rate.per.project.clustered <-
ggplot(data=atom.rates.clustered, aes(project, atom)) +
theme_classic() +
geom_point(colour="black", aes(size=1)) +
geom_point(colour="white", aes(size=0.8)) +
geom_point(aes(size = 0.81*rate, colour=domain)) +
scale_size_continuous(range = c(-.4,7)) +
scale_colour_manual(values = domain.colors) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.4), axis.ticks.x=element_blank()) +
theme(axis.ticks.y=element_blank(), axis.title.y=element_blank()) +
theme(axis.line=element_blank()) +
theme(legend.position="none") +
scale_y_discrete(limits=project.atoms.order$rowName) +
scale_x_discrete(limits=project.atoms.order$colName) +
#, labels=paste(clustered.project.order, substring(proj.to.domain[clustered.project.order],1,3), sep=' - ')) +
labs(x="Project")
atom.rate.per.project.clustered
ggsave("img/atom_rate_per_project_clustered.pdf", atom.rate.per.project.clustered, width=(width<-128), height=width*0.91, units = "mm")
############################
# all projects combined
############################
library(dplyr)
all.atom.counts.by.project <- atom.counts[, .(project, all.atoms = Reduce(`+`, .SD)),.SDcols=atom.names.dot]
all.atom.counts <- atom.counts[, -c('project','domain')][, lapply(.SD, sum)]
all.atom.rates.wide <- all.atom.counts[, -c('all.nodes', 'non.atoms')] / all.atom.counts$all.nodes
all.atom.rates <- data.table(data.frame(atom = unlist(atom.name.conversion[names(all.atom.rates.wide)]), rate = t(all.atom.rates.wide)))
atom.occurrence.rate <- ggplot(all.atom.rates, aes(x = reorder(atom, rate), y = rate)) +
theme_classic() +
geom_bar(stat="identity", fill=colors2[1]) +
geom_text(aes(y=0.0015, label=formatC(signif(rate,digits=2), digits=2, flag="#"),
color=atom %in% c('Omitted Curly Brace','Operator Precedence')), angle=0, hjust=0) +
theme(#axis.text.x=element_text(angle=90, hjust=1, vjust=.4), axis.text.y = element_blank(),
axis.text.x=element_blank(),
axis.ticks = element_blank(), axis.line = element_blank()) +
scale_y_continuous(limits = c(0.0,0.0073)) +
guides(color=FALSE) +
coord_flip() +
scale_color_manual(values=c('black', 'white')) +
labs(x="Atom", y="Occurrence Rate")
atom.occurrence.rate
ggsave("img/atom_occurrence_rate.pdf", atom.occurrence.rate, width=(width<-140), height=width*0.7, units = "mm")
# overall atom rate for paper
all.atom.ast.rate <- all.atom.counts[, (all.nodes - non.atoms) / all.nodes]
1/all.atom.ast.rate
nodes.per.omitted.curly <- 1/all.atom.counts[, omitted.curly.braces / all.nodes]
#################################
# all atoms by effect size
##################################
atom.effect <- data.table(merge(all.atom.rates, atom.effect.sizes[, .(atom = convert.atom.names(atom), effect.size)]))
confusingness.vs.prevalence.correlation <- with(atom.effect, cor(rate, effect.size)) # correlation: -0.45
atom.effect$offset.x <- atom.effect$offset.y <- 0
atom.effect[atom=="Preprocessor in Statement", c("offset.x", "offset.y") := .(0, .15)]
atom.effect[atom=="Conditional Operator", c("offset.x", "offset.y") := .(-1, -1.5)]
atom.effect[atom=="Comma Operator", c("offset.x", "offset.y") := .(-.5, .5)]
atom.effect[atom=="Repurposed Variable", c("offset.x", "offset.y") := .(0, -.5)]
atom.effect[atom=="Type Conversion", c("offset.x", "offset.y") := .(-3.5, -.17)]
confusingness.vs.prevalence <-
ggplot(atom.effect, aes(effect.size, rate)) +
theme_classic() +
geom_point(size=2.5, color=colors2dark[2]) +
geom_smooth(method="lm", se=FALSE, fullrange=TRUE, color=colors2dark[1], size=1) + #, aes(color="Exp Model"), formula= (y ~ x^2+1)) +
scale_x_continuous(limits = c(0.2, 0.75)) +
scale_y_log10(limits = c(5*10^-8, 9*10^-3)) +
geom_text(aes(label=atom, x=.009+effect.size+.003*offset.x, y=rate+0.0001*offset.y), hjust=0, vjust=.6, angle=-15, size=3) +
theme(axis.text.x=element_text(angle=90, hjust=1)) +
annotate("text", x=0.35, y=3*10^-6, label=paste0("r = ", round(confusingness.vs.prevalence.correlation, 2))) +
#ggtitle("Confusingness vs Prevalence", subtitle="Do less confusing patterns occur more often?") +
labs(x="Effect Size", y="Occurrence Rate (log)")
confusingness.vs.prevalence
ggsave("img/confusingness_vs_prevalence.pdf", confusingness.vs.prevalence, width=(width<-150), height=width*0.6, units = "mm")
################################################
# all projects by raw confusion of C question
# (not, the difference between C/NC)
################################################
## from snippet_study/results.R
# dput(atom.contingencies[, .(atom.name, correct.rate.C = round((TT + TF) / (TT + TF + FT + FF), digits=2))][order(atom.name)][,correct.rate.C])
correct.rate.C <- c(0.45, 0.48, 0.76, 0.78, 0.25, 0.3, 0.57, 0.62, 0.75, 0.54, 0.64, 0.3, 0.47, 0.52, 0.58)
atom.correct.C <- merge(all.atom.rates, cbind.data.frame(atom = atom.names, correct.rate.C))
with(atom.correct.C, cor(rate, 1-correct.rate.C))
ggplot(atom.correct.C, aes(rate, correct.rate.C)) + geom_point() +
geom_text(aes(label=atom), hjust=-0.1, angle=45, size=2) +
#geom_smooth(method="lm", aes(color="Exp Model"), formula= (y ~ x^2+1)) +
theme(axis.text.x=element_text(angle=90, hjust=1))
################################################
# atom count vs LOC in project
################################################
# cat ~/atom-finder/file_sizes_sorted.txt | sed 's,/home/dgopstein/opt/src/atom-finder/\([^/]*\)/,\1 ,' | ruby -lane 'BEGIN{h=Hash.new{|x| 0}}; count, proj, _ = $_.split; h[proj] += count.to_i; END{ p h}'
proj.loc <- data.table(proj=c("clang", "freebsd", "gcc", "gecko-dev", "linux", "mongo", "webkit", "emacs", "git", "subversion", "vim", "mysql-server", "nginx", "httpd"),
loc=c(1969346, 20252205, 5450514, 11380215, 22626962, 3864455, 4954408, 480268, 253422, 707786, 451820, 2979215, 186760, 317717))
loc.rate <- merge(proj.loc, atom.counts, by.x="proj", by.y="project")
ggplot(loc.rate, aes(loc, atom.rate)) +
geom_point() +
scale_x_log10()
################################################
# average atoms per line, and lines per atom
################################################
# github.com/AlDanial/cloc v 1.80 T=801.49 s (640.5 files/s, 141127.1 lines/s)
# ----------------------------------------------------------------------------------------
# Language files blank comment code
# ----------------------------------------------------------------------------------------
# C 85648 5324180 5970700 29452420
# C++ 53421 2226119 2239534 12034944
# C/C++ Header 74057 2152838 4175043 11390728
atom.finder.corpus.sloc <- 29452420 + 12034944 + 11390728
total.n.atoms <- sum(all.atom.counts[, -c('all.nodes', 'non.atoms')])
total.n.atoms
# line rates for paper
atoms.per.line <- total.n.atoms/atom.finder.corpus.sloc
lines.per.atom <- 1/atoms.per.line
################################################
# combined atom counts per project
################################################
all.atom.proj.rates <- atom.counts[, -c('non.atoms')][, .(rate = (base::sum(.SD) - all.nodes) / all.nodes), by=c('project', 'domain')]
all.atom.proj.rates.plot <- ggplot(all.atom.proj.rates, aes(x = reorder(project, rate), y = rate)) +
theme_classic() +
theme(plot.margin = margin(l=18, unit="mm")) +
geom_bar(stat="identity", aes(fill=domain)) +
scale_fill_manual(values=domain.colors) +
geom_text(aes(y=0.0005, label=sprintf("%0.3f", round(rate, digits=3))),
color='black', angle=0, hjust=0, size=2.5) +
theme(axis.text.x=element_blank(), axis.ticks = element_blank(), axis.line = element_blank(), axis.title.x = element_blank()) +
theme(axis.text.y=element_text(margin=margin(r=-7,"pt"), vjust=0.4)) +
theme(legend.position = c(0.87, 0.36), legend.key.size = unit(0.58,"line")) +
guides(color=FALSE) +
coord_flip() +
labs(x="Project", fill="Domain")
all.atom.proj.rates.plot
ggsave("img/all_atom_proj_rates.pdf", all.atom.proj.rates.plot, width=(width<-130), height=width*.3, units = "mm")
########################################
# Plot of Atom Effect Size (for slide deck)
########################################
atom.effect
ggplot(atom.effect, aes(reorder(atom, effect.size), effect.size)) +
geom_bar(stat="identity", fill=colors2[1]) +
geom_text(aes(y=0.05, label=sprintf("%.02f", signif(effect.size, digits=2))), color="#FFFFFF", angle=0, hjust=0, fontface='bold') +
theme_classic() +
theme(axis.line=element_blank(), axis.ticks = element_blank()) +
theme(axis.text.y = element_text(hjust = 1, vjust=.4, size=16)) +
theme(axis.text.x = element_blank()) +
theme(axis.title = element_text(size=20)) +
theme(axis.title.y = element_text(margin = margin(t=0, r=20, b=0, l=0))) +
labs(x = 'Atom of Confusion', y = 'Effect Size (Confusingness)') +
coord_flip()
########################################
# Compare atom rates with regular node rates
########################################
all.node.counts <- data.table(read.csv('data/all-node-counts_2018-08-31_for-emse.csv'))
all.node.total <- all.node.counts[, sum(count)]
all.node.counts[, rate := count / all.node.total]
print(all.node.counts, nrows=200)
selected.node.counts <- all.node.counts[node.type %in% c('<IfStatement>', ':not', ':multiply', ':divide', ':multiplyAssign', ':divideAssign', ':throw')]
node.occurrence.rate <- ggplot(selected.node.counts, aes(x = reorder(node.type, rate), y = rate)) +
theme_classic() +
geom_bar(stat="identity", fill=colors2[1]) +
geom_text(aes(y=0.0010, label=formatC(signif(rate,digits=2), digits=2, flag="#"),
color=node.type %in% c('Omitted Curly Brace','Operator Precedence')), angle=0, hjust=0) +
theme(#axis.text.x=element_text(angle=90, hjust=1, vjust=.4), axis.text.y = element_blank(),
axis.text.x=element_blank(),
axis.ticks = element_blank(), axis.line = element_blank()) +
scale_y_continuous(limits = c(0.0,0.013)) +
guides(color=FALSE) +
coord_flip() +
scale_color_manual(values=c('black', 'white')) +
labs(x="Node", y="Occurrence Rate")
node.occurrence.rate
ggsave("img/node_occurrence_rate.pdf", node.occurrence.rate, width=(width<-140), height=width*0.7, units = "mm")
atom.node.rates <- rbind(selected.node.counts[, .(name = node.type, rate, type="node")],
all.atom.rates[, .(name = atom, rate, type="atom")])
atom.node.occurrence.rate <- ggplot(atom.node.rates, aes(x = reorder(name, rate), y = rate)) +
theme_classic() +
geom_bar(stat="identity", aes(fill=colors2[as.integer(as.factor(type))])) +
geom_text(aes(y=0.0010, label=formatC(signif(rate,digits=2), digits=2, flag="#"),
color=rate>0.001), angle=0, hjust=0) +
theme(#axis.text.x=element_text(angle=90, hjust=1, vjust=.4), axis.text.y = element_blank(),
axis.text.x=element_blank(),
axis.ticks = element_blank(), axis.line = element_blank()) +
scale_y_continuous(limits = c(0.0,0.013)) +
guides(color=FALSE, fill=FALSE) +
coord_flip() +
scale_color_manual(values=c('black', 'white')) +
labs(x="Node", y="Occurrence Rate")
atom.node.occurrence.rate
as.integer(factor(atom.node.rates$type))
|
testlist <- list(latLongs = structure(c(-1.99382434780448e+304, 1.39065416902259e-309, 8.91420948946625e+303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(2L, 9L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) | /MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612727893-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 240 | r | testlist <- list(latLongs = structure(c(-1.99382434780448e+304, 1.39065416902259e-309, 8.91420948946625e+303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(2L, 9L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) |
#'get project title from working directory
#' This function gets the project title from the working directory for use in the metadata.rmd
#' @return the project title as a charcter vector
#' @export
get_project_title<-function(){
txt<-paste0(getwd())
if (grepl("minimum", txt, fixed = TRUE)==TRUE){
txt<-stringr::str_replace(txt, "/minimum_metadata/minimum_metadata", "")
} else{txt<-txt}
x<-stringr::str_locate_all(txt,"/")
stringr::str_sub(txt,x[[c(1,dim(x[[1]])[1])]]+1)
}
get_project_title()
#get_project_title()
| /R/get_project_title.R | no_license | DrMattG/LivingNorwayR | R | false | false | 536 | r | #'get project title from working directory
#' This function gets the project title from the working directory for use in the metadata.rmd
#' @return the project title as a charcter vector
#' @export
get_project_title<-function(){
txt<-paste0(getwd())
if (grepl("minimum", txt, fixed = TRUE)==TRUE){
txt<-stringr::str_replace(txt, "/minimum_metadata/minimum_metadata", "")
} else{txt<-txt}
x<-stringr::str_locate_all(txt,"/")
stringr::str_sub(txt,x[[c(1,dim(x[[1]])[1])]]+1)
}
get_project_title()
#get_project_title()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.