content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(htmltools)
test_that("mergeLists", {
a <- list(a = 1, b = "b", c = 3)
b <- list(a = 2, c = 4, d = "d")
expect_equal(mergeLists(a, b), list(a = 2, b = "b", c = 4, d = "d"))
a <- list(a = 1, b = 2)
b <- list()
expect_equal(mergeLists(a, b), list(a = 1, b = 2))
a <- list()
b <- list(a = 1, b = 2)
expect_equal(mergeLists(a, b), list(a = 1, b = 2))
a <- list(a = NULL, b = 2)
b <- list(a = 1, b = NULL)
expect_equal(mergeLists(a, b), list(a = 1, b = 2))
expect_equal(mergeLists(NULL, list(a = 1, b = 2)), list(a = 1, b = 2))
expect_equal(mergeLists(list(a = 1, b = 2), NULL), list(a = 1, b = 2))
a <- list(a = NULL, b = 2, 3)
b <- list(a = 1, b = NULL, 4)
expect_equal(mergeLists(a, b), list(a = 1, b = 2, 3, 4))
a <- list(a = NULL, b = 2)
b <- list(1, 2, 3)
expect_equal(mergeLists(a, b), list(a = NULL, b = 2, 1, 2, 3))
})
test_that("filterNulls", {
expect_equal(filterNulls(list(a = 1, b = NULL, c = NULL, d = 2)), list(a = 1, d = 2))
expect_equal(filterNulls(list(a = 1, b = "b")), list(a = 1, b = "b"))
expect_equal(filterNulls(list(a = 1, 2, b = NULL)), list(a = 1, 2))
expect_equal(filterNulls(list(1, NULL, 2)), list(1, 2))
})
test_that("asJSONList", {
expect_equal(as.character(toJSON(asJSONList("x"))), '["x"]')
expect_equal(as.character(toJSON(asJSONList(c(1, 2)))), '[1,2]')
expect_equal(as.character(toJSON(asJSONList(c()))), 'null')
expect_equal(asJSONList(NULL), NULL)
})
test_that("isNamedList", {
expect_true(isNamedList(list()))
expect_true(isNamedList(list(a = 1, b = 2)))
expect_false(isNamedList(list(1)))
expect_false(isNamedList(list(1, a = 2)))
expect_false(isNamedList(NULL))
expect_false(isNamedList("a"))
})
test_that("is.tag", {
expect_true(is.tag(tags$div()))
expect_false(is.tag(list()))
})
test_that("is.htmlwidget", {
expect_true(is.htmlwidget(reactable(data.frame(x = 1))))
expect_false(is.htmlwidget(div()))
})
test_that("is.htmlDependency", {
dep <- htmlDependency("dep", "0.1.0", "/path/to/dep")
expect_true(is.htmlDependency(dep))
expect_false(is.htmlDependency(div()))
})
test_that("isTagList", {
expect_true(isTagList(tagList()))
expect_true(isTagList(tagList("a")))
expect_true(isTagList(tagList(1, div())))
expect_true(isTagList(list(div(), span())))
expect_false(isTagList(div()))
expect_false(isTagList(list(div(), list())))
})
test_that("asReactTag", {
# Nodes should be strings
expect_equal(asReactTag("text"), "text")
expect_equal(asReactTag("\u2718"), "\u2718")
expect_equal(asReactTag(123), "123")
expect_equal(asReactTag(TRUE), "TRUE")
expect_equal(asReactTag(NA), "NA") # should be "NA" rather than NA_character_
expect_equal(asReactTag(NA_character_), "NA") # should be "NA" rather than NA_character_
expect_equal(asReactTag(factor("xy")), "xy")
expect_equal(asReactTag(as.Date("2019-01-03")), "2019-01-03")
expect_equal(asReactTag(list("text")), "text")
# NULLs should be left as-is
expect_equal(asReactTag(NULL), NULL)
# Tags should be extracted from nested tables
tag <- asReactTag(reactable(data.frame(x = 1)))
expect_true(is.tag(tag))
# Nested tables should be marked
expect_true(tag$attribs$nested)
# All other htmlwidgets should be converted to tags
tbl <- reactable(data.frame(x = 1))
class(tbl) <- c("my-widget", "htmlwidget")
tag <- asReactTag(tbl)
expect_equal(tag$name, "WidgetContainer")
expect_equal(tag$attribs, list(key = digest::digest(tbl)))
expect_equal(findDependencies(tag), findDependencies(tbl))
expect_equal(length(tag$children), 1)
expect_equal(tag$children[[1]]$name, "Fragment")
# Tag lists should be unnested and wrapped in fragments
expect_equal(asReactTag(tagList()), reactR::React$Fragment())
expect_equal(asReactTag(tagList(div("x"))), reactR::React$Fragment(div("x")))
expect_equal(asReactTag(tagList(div(), "x")), reactR::React$Fragment(div(), "x"))
# htmlwidgets in tag lists
tag <- asReactTag(tagList(reactable(data.frame(x = 1)), "y"))
expect_equal(length(tag$children), 2)
expect_true(is.tag(tag$children[[1]]))
expect_equal(tag$children[[2]], "y")
# Nested tags should be unnested
nestedTag <- div(
list(
div(),
div(list(div()))
)
)
expected <- div(
div(),
div(div())
)
expect_equal(asReactTag(nestedTag), expected)
nestedTag <- div(
tagList("a", div(
tagList("b", span("c", class = "c"))
))
)
expected <- div("a", div("b", span("c", className = "c")))
expect_equal(asReactTag(nestedTag), expected)
nestedTagList <- tagList(
div(class = "a"),
tagList(
div(),
tagList("x", span("y", class = "y"))
)
)
expected <- reactR::React$Fragment(
div(className = "a"),
div(),
"x",
span("y", className = "y")
)
expect_equal(asReactTag(nestedTagList), expected)
# Null elements should be pruned
expect_equal(asReactTag(div(1, NULL, 3)), div("1", "3"))
expect_equal(asReactTag(tagList(NULL, "a", tagList(NULL, "b", NULL), div(NULL, "c"))),
reactR::React$Fragment("a", "b", div("c")))
# Attributes should be converted
expect_equal(asReactTag(div(style = "color: red", class = "cls")),
div(style = list(color = "red"), className = "cls"))
# Attributes should be preserved
expect_equal(asReactTag(div(factor("xy"))), div("xy"))
expect_equal(asReactTag(div(div(as.Date("2019-01-03")))), div(div("2019-01-03")))
# Duplicate attributes should be included and collapsed (e.g., for likelihood of
# duplicate class attributes in HTML widgets with htmlwidgets >= 1.6.0)
expect_equal(asReactTag(div(class = "a", class = "b")), div(className = "a b"))
expect_equal(
asReactTag(span(class = "a", test = "t", style = list(color = "red"), class = "bb", style = list(color = "blue"))),
span(test = "t", style = list(list(color = "red"), list(color = "blue")), className = "a bb")
)
})
test_that("asReactTag preserves HTML dependencies", {
dep <- htmlDependency("dep", "0.1.0", "/path/to/dep")
dep2 <- htmlDependency("dep2", "0.5.0", "/path/to/dep2")
# Single tag
tag <- attachDependencies(div(div("x")), dep)
expect_equal(htmlDependencies(asReactTag(tag)), list(dep))
# Tag w/ nested deps
tag <- div(attachDependencies(div("x"), dep))
expect_equal(htmlDependencies(asReactTag(tag)$children[[1]]), list(dep))
# Multiple nested deps
tag <- div(attachDependencies(div("x"), dep2), attachDependencies(div("x"), dep))
expect_equal(findDependencies(asReactTag(tag)), list(dep2, dep))
# Tag list
tag <- attachDependencies(tagList(div("x")), dep)
expect_equal(htmlDependencies(asReactTag(tag)), list(dep))
# Tag list w/ nested tag deps
tag <- attachDependencies(tagList(div("x"), attachDependencies(div("y"), dep)), dep2)
expect_equal(findDependencies(asReactTag(tag)), list(dep, dep2))
# Tag list w/ nested tag list deps
tag <- attachDependencies(tagList(div("x"), attachDependencies(tagList("y"), dep)), dep2)
expect_equal(findDependencies(asReactTag(tag)), list(dep2, dep))
# Tag w/ nested tag list deps
tag <- div(attachDependencies(tagList(div("x")), dep), div("y"))
expect_equal(findDependencies(asReactTag(tag)), list(dep))
# HTML dependency objects
tag <- tagList("x", "y", dep)
expect_equal(asReactTag(tag), attachDependencies(reactR::React$Fragment("x", "y"), dep))
tag <- div("x", div(), dep, dep2, "z")
expect_equal(asReactTag(tag), attachDependencies(div("x", div(), "z"), list(dep, dep2)))
# Nested HTML dependency objects
tag <- tagList("x", div(dep), span("y"))
expect_equal(asReactTag(tag), reactR::React$Fragment("x", attachDependencies(div(), dep), span("y")))
tag <- div("x", tagList(dep), span("y"))
expect_equal(asReactTag(tag), attachDependencies(div("x", span("y")), dep))
# HTML dependencies in nested tables
tbl <- reactable(
data.frame(x = 1),
columns = list(x = colDef(cell = function() tagList(dep, dep2)))
)
tag <- asReactTag(tbl)
expect_equal(htmlDependencies(tag), list(dep, dep2))
})
test_that("asReactAttributes", {
attribs <- list(class = "cls", "for" = "id", tabindex = 1)
expected <- list(className = "cls", htmlFor = "id", tabIndex = 1)
expect_equal(asReactAttributes(attribs, "th"), expected)
attribs <- list(value = "x")
expect_equal(asReactAttributes(attribs, "input"), list(defaultValue = "x"))
expect_equal(asReactAttributes(attribs, "select"), list(defaultValue = "x"))
expect_equal(asReactAttributes(attribs, "textarea"), list(defaultValue = "x"))
expect_equal(asReactAttributes(attribs, "option"), list(value = "x"))
expect_equal(asReactAttributes(attribs, "button"), list(value = "x"))
attribs <- list(checked = NA)
expect_equal(asReactAttributes(attribs, "input"), list(defaultChecked = TRUE))
expect_equal(asReactAttributes(attribs, "div"), list(checked = NA))
attribs <- list(onchange = "onChange(this, event)", onclick = "console.log(this, event);")
expect_equal(
asReactAttributes(attribs, "select"),
list(
onChange = JS("function(_e){(function(event){onChange(this, event)}).apply(event.target,[_e])}"),
onClick = JS("function(_e){(function(event){console.log(this, event);}).apply(event.target,[_e])}")
)
)
attribs <- list(style = "border: none; color: red; text-align: left")
expected <- list(style = list(border = "none", color = "red", "text-align" = "left"))
expect_equal(asReactAttributes(attribs, "div"), expected)
attribs <- list(style = list(border = "none"))
expected <- list(style = list(border = "none"))
expect_equal(asReactAttributes(attribs, "div"), expected)
# Non-converted attributes
expect_equal(asReactAttributes(list("data-attr" = "t"), "div"), list("data-attr" = "t"))
expect_equal(asReactAttributes(list("aria-label" = "lab"), "div"), list("aria-label" = "lab"))
})
test_that("asReactStyle", {
expect_equal(asReactStyle("color: red"), list(color = "red"))
expect_equal(asReactStyle("color: red;"), list(color = "red"))
expect_equal(asReactStyle(" color: red; margin-bottom:55px ;"),
list(color = "red", "margin-bottom" = "55px"))
expect_equal(asReactStyle(" color: red ;; margin-bott"),
list(color = "red"))
expect_equal(asReactStyle("color"), list())
expect_equal(asReactStyle(list(height = 0)), list(height = 0))
})
test_that("trimws", {
expect_equal(trimws(" "), "")
expect_equal(trimws("xvz "), "xvz")
expect_equal(trimws("abd "), "abd")
expect_equal(trimws(" xvz "), "xvz")
})
test_that("callFunc", {
expect_equal(callFunc(function(x) x, 5), 5)
expect_equal(callFunc(function(x) x, 5, "a", "b"), 5)
expect_equal(callFunc(function(x, y) x + y, 5, 1), 6)
expect_equal(callFunc(function(x, y) x + y, 5, 1), 6)
expect_equal(callFunc(function(x) x), NULL)
expect_equal(callFunc(function(x, y) y, "x"), NULL)
})
| /tests/testthat/test-utils.R | permissive | glin/reactable | R | false | false | 10,854 | r | library(htmltools)
test_that("mergeLists", {
a <- list(a = 1, b = "b", c = 3)
b <- list(a = 2, c = 4, d = "d")
expect_equal(mergeLists(a, b), list(a = 2, b = "b", c = 4, d = "d"))
a <- list(a = 1, b = 2)
b <- list()
expect_equal(mergeLists(a, b), list(a = 1, b = 2))
a <- list()
b <- list(a = 1, b = 2)
expect_equal(mergeLists(a, b), list(a = 1, b = 2))
a <- list(a = NULL, b = 2)
b <- list(a = 1, b = NULL)
expect_equal(mergeLists(a, b), list(a = 1, b = 2))
expect_equal(mergeLists(NULL, list(a = 1, b = 2)), list(a = 1, b = 2))
expect_equal(mergeLists(list(a = 1, b = 2), NULL), list(a = 1, b = 2))
a <- list(a = NULL, b = 2, 3)
b <- list(a = 1, b = NULL, 4)
expect_equal(mergeLists(a, b), list(a = 1, b = 2, 3, 4))
a <- list(a = NULL, b = 2)
b <- list(1, 2, 3)
expect_equal(mergeLists(a, b), list(a = NULL, b = 2, 1, 2, 3))
})
test_that("filterNulls", {
expect_equal(filterNulls(list(a = 1, b = NULL, c = NULL, d = 2)), list(a = 1, d = 2))
expect_equal(filterNulls(list(a = 1, b = "b")), list(a = 1, b = "b"))
expect_equal(filterNulls(list(a = 1, 2, b = NULL)), list(a = 1, 2))
expect_equal(filterNulls(list(1, NULL, 2)), list(1, 2))
})
test_that("asJSONList", {
expect_equal(as.character(toJSON(asJSONList("x"))), '["x"]')
expect_equal(as.character(toJSON(asJSONList(c(1, 2)))), '[1,2]')
expect_equal(as.character(toJSON(asJSONList(c()))), 'null')
expect_equal(asJSONList(NULL), NULL)
})
test_that("isNamedList", {
expect_true(isNamedList(list()))
expect_true(isNamedList(list(a = 1, b = 2)))
expect_false(isNamedList(list(1)))
expect_false(isNamedList(list(1, a = 2)))
expect_false(isNamedList(NULL))
expect_false(isNamedList("a"))
})
test_that("is.tag", {
expect_true(is.tag(tags$div()))
expect_false(is.tag(list()))
})
test_that("is.htmlwidget", {
expect_true(is.htmlwidget(reactable(data.frame(x = 1))))
expect_false(is.htmlwidget(div()))
})
test_that("is.htmlDependency", {
dep <- htmlDependency("dep", "0.1.0", "/path/to/dep")
expect_true(is.htmlDependency(dep))
expect_false(is.htmlDependency(div()))
})
test_that("isTagList", {
expect_true(isTagList(tagList()))
expect_true(isTagList(tagList("a")))
expect_true(isTagList(tagList(1, div())))
expect_true(isTagList(list(div(), span())))
expect_false(isTagList(div()))
expect_false(isTagList(list(div(), list())))
})
test_that("asReactTag", {
# Nodes should be strings
expect_equal(asReactTag("text"), "text")
expect_equal(asReactTag("\u2718"), "\u2718")
expect_equal(asReactTag(123), "123")
expect_equal(asReactTag(TRUE), "TRUE")
expect_equal(asReactTag(NA), "NA") # should be "NA" rather than NA_character_
expect_equal(asReactTag(NA_character_), "NA") # should be "NA" rather than NA_character_
expect_equal(asReactTag(factor("xy")), "xy")
expect_equal(asReactTag(as.Date("2019-01-03")), "2019-01-03")
expect_equal(asReactTag(list("text")), "text")
# NULLs should be left as-is
expect_equal(asReactTag(NULL), NULL)
# Tags should be extracted from nested tables
tag <- asReactTag(reactable(data.frame(x = 1)))
expect_true(is.tag(tag))
# Nested tables should be marked
expect_true(tag$attribs$nested)
# All other htmlwidgets should be converted to tags
tbl <- reactable(data.frame(x = 1))
class(tbl) <- c("my-widget", "htmlwidget")
tag <- asReactTag(tbl)
expect_equal(tag$name, "WidgetContainer")
expect_equal(tag$attribs, list(key = digest::digest(tbl)))
expect_equal(findDependencies(tag), findDependencies(tbl))
expect_equal(length(tag$children), 1)
expect_equal(tag$children[[1]]$name, "Fragment")
# Tag lists should be unnested and wrapped in fragments
expect_equal(asReactTag(tagList()), reactR::React$Fragment())
expect_equal(asReactTag(tagList(div("x"))), reactR::React$Fragment(div("x")))
expect_equal(asReactTag(tagList(div(), "x")), reactR::React$Fragment(div(), "x"))
# htmlwidgets in tag lists
tag <- asReactTag(tagList(reactable(data.frame(x = 1)), "y"))
expect_equal(length(tag$children), 2)
expect_true(is.tag(tag$children[[1]]))
expect_equal(tag$children[[2]], "y")
# Nested tags should be unnested
nestedTag <- div(
list(
div(),
div(list(div()))
)
)
expected <- div(
div(),
div(div())
)
expect_equal(asReactTag(nestedTag), expected)
nestedTag <- div(
tagList("a", div(
tagList("b", span("c", class = "c"))
))
)
expected <- div("a", div("b", span("c", className = "c")))
expect_equal(asReactTag(nestedTag), expected)
nestedTagList <- tagList(
div(class = "a"),
tagList(
div(),
tagList("x", span("y", class = "y"))
)
)
expected <- reactR::React$Fragment(
div(className = "a"),
div(),
"x",
span("y", className = "y")
)
expect_equal(asReactTag(nestedTagList), expected)
# Null elements should be pruned
expect_equal(asReactTag(div(1, NULL, 3)), div("1", "3"))
expect_equal(asReactTag(tagList(NULL, "a", tagList(NULL, "b", NULL), div(NULL, "c"))),
reactR::React$Fragment("a", "b", div("c")))
# Attributes should be converted
expect_equal(asReactTag(div(style = "color: red", class = "cls")),
div(style = list(color = "red"), className = "cls"))
# Attributes should be preserved
expect_equal(asReactTag(div(factor("xy"))), div("xy"))
expect_equal(asReactTag(div(div(as.Date("2019-01-03")))), div(div("2019-01-03")))
# Duplicate attributes should be included and collapsed (e.g., for likelihood of
# duplicate class attributes in HTML widgets with htmlwidgets >= 1.6.0)
expect_equal(asReactTag(div(class = "a", class = "b")), div(className = "a b"))
expect_equal(
asReactTag(span(class = "a", test = "t", style = list(color = "red"), class = "bb", style = list(color = "blue"))),
span(test = "t", style = list(list(color = "red"), list(color = "blue")), className = "a bb")
)
})
test_that("asReactTag preserves HTML dependencies", {
dep <- htmlDependency("dep", "0.1.0", "/path/to/dep")
dep2 <- htmlDependency("dep2", "0.5.0", "/path/to/dep2")
# Single tag
tag <- attachDependencies(div(div("x")), dep)
expect_equal(htmlDependencies(asReactTag(tag)), list(dep))
# Tag w/ nested deps
tag <- div(attachDependencies(div("x"), dep))
expect_equal(htmlDependencies(asReactTag(tag)$children[[1]]), list(dep))
# Multiple nested deps
tag <- div(attachDependencies(div("x"), dep2), attachDependencies(div("x"), dep))
expect_equal(findDependencies(asReactTag(tag)), list(dep2, dep))
# Tag list
tag <- attachDependencies(tagList(div("x")), dep)
expect_equal(htmlDependencies(asReactTag(tag)), list(dep))
# Tag list w/ nested tag deps
tag <- attachDependencies(tagList(div("x"), attachDependencies(div("y"), dep)), dep2)
expect_equal(findDependencies(asReactTag(tag)), list(dep, dep2))
# Tag list w/ nested tag list deps
tag <- attachDependencies(tagList(div("x"), attachDependencies(tagList("y"), dep)), dep2)
expect_equal(findDependencies(asReactTag(tag)), list(dep2, dep))
# Tag w/ nested tag list deps
tag <- div(attachDependencies(tagList(div("x")), dep), div("y"))
expect_equal(findDependencies(asReactTag(tag)), list(dep))
# HTML dependency objects
tag <- tagList("x", "y", dep)
expect_equal(asReactTag(tag), attachDependencies(reactR::React$Fragment("x", "y"), dep))
tag <- div("x", div(), dep, dep2, "z")
expect_equal(asReactTag(tag), attachDependencies(div("x", div(), "z"), list(dep, dep2)))
# Nested HTML dependency objects
tag <- tagList("x", div(dep), span("y"))
expect_equal(asReactTag(tag), reactR::React$Fragment("x", attachDependencies(div(), dep), span("y")))
tag <- div("x", tagList(dep), span("y"))
expect_equal(asReactTag(tag), attachDependencies(div("x", span("y")), dep))
# HTML dependencies in nested tables
tbl <- reactable(
data.frame(x = 1),
columns = list(x = colDef(cell = function() tagList(dep, dep2)))
)
tag <- asReactTag(tbl)
expect_equal(htmlDependencies(tag), list(dep, dep2))
})
test_that("asReactAttributes", {
attribs <- list(class = "cls", "for" = "id", tabindex = 1)
expected <- list(className = "cls", htmlFor = "id", tabIndex = 1)
expect_equal(asReactAttributes(attribs, "th"), expected)
attribs <- list(value = "x")
expect_equal(asReactAttributes(attribs, "input"), list(defaultValue = "x"))
expect_equal(asReactAttributes(attribs, "select"), list(defaultValue = "x"))
expect_equal(asReactAttributes(attribs, "textarea"), list(defaultValue = "x"))
expect_equal(asReactAttributes(attribs, "option"), list(value = "x"))
expect_equal(asReactAttributes(attribs, "button"), list(value = "x"))
attribs <- list(checked = NA)
expect_equal(asReactAttributes(attribs, "input"), list(defaultChecked = TRUE))
expect_equal(asReactAttributes(attribs, "div"), list(checked = NA))
attribs <- list(onchange = "onChange(this, event)", onclick = "console.log(this, event);")
expect_equal(
asReactAttributes(attribs, "select"),
list(
onChange = JS("function(_e){(function(event){onChange(this, event)}).apply(event.target,[_e])}"),
onClick = JS("function(_e){(function(event){console.log(this, event);}).apply(event.target,[_e])}")
)
)
attribs <- list(style = "border: none; color: red; text-align: left")
expected <- list(style = list(border = "none", color = "red", "text-align" = "left"))
expect_equal(asReactAttributes(attribs, "div"), expected)
attribs <- list(style = list(border = "none"))
expected <- list(style = list(border = "none"))
expect_equal(asReactAttributes(attribs, "div"), expected)
# Non-converted attributes
expect_equal(asReactAttributes(list("data-attr" = "t"), "div"), list("data-attr" = "t"))
expect_equal(asReactAttributes(list("aria-label" = "lab"), "div"), list("aria-label" = "lab"))
})
test_that("asReactStyle", {
expect_equal(asReactStyle("color: red"), list(color = "red"))
expect_equal(asReactStyle("color: red;"), list(color = "red"))
expect_equal(asReactStyle(" color: red; margin-bottom:55px ;"),
list(color = "red", "margin-bottom" = "55px"))
expect_equal(asReactStyle(" color: red ;; margin-bott"),
list(color = "red"))
expect_equal(asReactStyle("color"), list())
expect_equal(asReactStyle(list(height = 0)), list(height = 0))
})
test_that("trimws", {
expect_equal(trimws(" "), "")
expect_equal(trimws("xvz "), "xvz")
expect_equal(trimws("abd "), "abd")
expect_equal(trimws(" xvz "), "xvz")
})
test_that("callFunc", {
expect_equal(callFunc(function(x) x, 5), 5)
expect_equal(callFunc(function(x) x, 5, "a", "b"), 5)
expect_equal(callFunc(function(x, y) x + y, 5, 1), 6)
expect_equal(callFunc(function(x, y) x + y, 5, 1), 6)
expect_equal(callFunc(function(x) x), NULL)
expect_equal(callFunc(function(x, y) y, "x"), NULL)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/valid_corr.R
\name{valid_corr}
\alias{valid_corr}
\title{Determine Correlation Bounds for Ordinal, Continuous, Poisson, and/or Negative Binomial Variables: Correlation Method 1}
\usage{
valid_corr(k_cat = 0, k_cont = 0, k_pois = 0, k_nb = 0,
method = c("Fleishman", "Polynomial"), means = NULL, vars = NULL,
skews = NULL, skurts = NULL, fifths = NULL, sixths = NULL,
Six = list(), marginal = list(), lam = NULL, size = NULL,
prob = NULL, mu = NULL, rho = NULL, n = 100000, seed = 1234)
}
\arguments{
\item{k_cat}{the number of ordinal (r >= 2 categories) variables (default = 0)}
\item{k_cont}{the number of continuous variables (default = 0)}
\item{k_pois}{the number of Poisson variables (default = 0)}
\item{k_nb}{the number of Negative Binomial variables (default = 0)}
\item{method}{the method used to generate the k_cont continuous variables. "Fleishman" uses a third-order polynomial transformation
and "Polynomial" uses Headrick's fifth-order transformation.}
\item{means}{a vector of means for the k_cont continuous variables (i.e. = rep(0, k_cont))}
\item{vars}{a vector of variances (i.e. = rep(1, k_cont))}
\item{skews}{a vector of skewness values (i.e. = rep(0, k_cont))}
\item{skurts}{a vector of standardized kurtoses (kurtosis - 3, so that normal variables have a value of 0; i.e. = rep(0, k_cont))}
\item{fifths}{a vector of standardized fifth cumulants (not necessary for \code{method} = "Fleishman"; i.e. = rep(0, k_cont))}
\item{sixths}{a vector of standardized sixth cumulants (not necessary for \code{method} = "Fleishman"; i.e. = rep(0, k_cont))}
\item{Six}{a list of vectors of correction values to add to the sixth cumulants if no valid pdf constants are found,
ex: \code{Six = list(seq(0.01, 2,by = 0.01), seq(1, 10,by = 0.5))}; if no correction is desired for variable Y_i, set the i-th list
component equal to NULL}
\item{marginal}{a list of length equal to \code{k_cat}; the i-th element is a vector of the cumulative
probabilities defining the marginal distribution of the i-th variable;
if the variable can take r values, the vector will contain r - 1 probabilities (the r-th is assumed to be 1; default = list())}
\item{lam}{a vector of lambda (> 0) constants for the Poisson variables (see \code{\link[stats]{Poisson}})}
\item{size}{a vector of size parameters for the Negative Binomial variables (see \code{\link[stats]{NegBinomial}})}
\item{prob}{a vector of success probability parameters}
\item{mu}{a vector of mean parameters (*Note: either \code{prob} or \code{mu} should be supplied for all Negative Binomial variables,
not a mixture; default = NULL)}
\item{rho}{the target correlation matrix (\emph{must be ordered ordinal, continuous, Poisson, Negative Binomial}; default = NULL)}
\item{n}{the sample size (i.e. the length of each simulated variable; default = 100000)}
\item{seed}{the seed value for random number generation (default = 1234)}
}
\value{
A list with components:
\code{L_rho} the lower correlation bound
\code{U_rho} the upper correlation bound
If continuous variables are desired, additional components are:
\code{constants} the calculated constants
\code{sixth_correction} a vector of the sixth cumulant correction values
\code{valid.pdf} a vector with i-th component equal to "TRUE" if variable Y_i has a valid power method pdf, else "FALSE"
If a target correlation matrix rho is provided, each pairwise correlation is checked to see if it is within the lower and upper
bounds. If the correlation is outside the bounds, the indices of the variable pair are given.
}
\description{
This function calculates the lower and upper correlation bounds for the given distributions and
checks if a given target correlation matrix \code{rho} is within the bounds. It should be used before simulation with
\code{\link[SimMultiCorrData]{rcorrvar}}. However, even if all pairwise correlations fall within the bounds, it is still possible
that the desired correlation matrix is not feasible. This is particularly true when ordinal variables (r >= 2 categories) are
generated or negative correlations are desired. Therefore, this function should be used as a general check to eliminate pairwise correlations that are obviously
not reproducible. It will help prevent errors when executing the simulation.
Note: Some pieces of the function code have been adapted from Demirtas, Hu, & Allozi's (2017) \code{\link[PoisBinOrdNor]{validation_specs}}.
This function (\code{\link[SimMultiCorrData]{valid_corr}}) extends the methods to:
1) non-normal continuous variables generated by Fleishman's third-order or Headrick's fifth-order polynomial transformation method, and
2) Negative Binomial variables (including all pairwise correlations involving them).
Please see the \bold{Comparison of Method 1 and Method 2} vignette for more information regarding method 1.
}
\section{Reasons for Function Errors}{
1) The most likely cause for function errors is that no solutions to \code{\link[SimMultiCorrData]{fleish}} or
\code{\link[SimMultiCorrData]{poly}} converged when using \code{\link[SimMultiCorrData]{find_constants}}. If this happens,
the simulation will stop. It may help to first use \code{\link[SimMultiCorrData]{find_constants}} for each continuous variable to
determine if a vector of sixth cumulant correction values is needed. If the standardized cumulants are obtained from \code{calc_theory},
the user may need to use rounded values as inputs (i.e.
\code{skews = round(skews, 8)}). Due to the nature of the integration involved in \code{calc_theory}, the results are
approximations. Greater accuracy can be achieved by increasing the number of subdivisions (\code{sub}) used in the integration
process. For example, in order to ensure that skew is exactly 0 for symmetric distributions.
2) In addition, the kurtosis may be outside the region of possible values. There is an associated lower boundary for kurtosis associated
with a given skew (for Fleishman's method) or skew and fifth and sixth cumulants (for Headrick's method). Use
\code{\link[SimMultiCorrData]{calc_lower_skurt}} to determine the boundary for a given set of cumulants.
}
\section{The Generate, Sort, and Correlate (GSC, Demirtas & Hedeker, 2011, \doi{10.1198/tast.2011.10090}) Algorithm}{
The GSC algorithm is a flexible method for determining empirical correlation bounds when the theoretical bounds are unknown.
The steps are as follows:
1) Generate independent random samples from the desired distributions using a large number of observations (i.e. N = 100,000).
2) Lower Bound: Sort the two variables in opposite directions (i.e., one increasing and one decreasing) and find the sample correlation.
3) Upper Bound: Sort the two variables in the same direction and find the sample correlation.
Demirtas & Hedeker showed that the empirical bounds computed from the GSC method are similar to the theoretical bounds (when they are known).
}
\section{The Frechet-Hoeffding Correlation Bounds}{
Suppose two random variables \eqn{Y_{i}} and \eqn{Y_{j}} have cumulative distribution functions given by \eqn{F_{i}} and \eqn{F_{j}}.
Let U be a uniform(0,1) random variable, i.e. representing the distribution of the standard normal cdf. Then Hoeffing (1940) and
Frechet (1951) showed that bounds for the correlation between \eqn{Y_{i}} and \eqn{Y_{j}} are given by
\deqn{(corr(F_{i}^{-1}(U), F_{j}^{-1}(1-U)), corr(F_{i}^{-1}(U), F_{j}^{-1}(U)))}
The processes used to find the correlation bounds for each variable type are described below:
}
\section{Ordinal Variables}{
Binary pairs: The correlation bounds are determined as in Demirtas et al. (2012, \doi{10.1002/sim.5362}), who used the method of Emrich &
Piedmonte (1991, \doi{10.1080/00031305.1991.10475828}). The joint distribution is determined by "borrowing" the moments of a multivariate normal
distribution. For two binary variables \eqn{Y_{i}} and \eqn{Y_{j}}, with success probabilities \eqn{p_{i}} and \eqn{p_{j}}, the lower
correlation bound is given by
\deqn{max(-\sqrt{(p_{i}p_{j})/(q_{i}q_{j})},\ -\sqrt{(q_{i}q_{j})/(p_{i}p_{j})})}
and the upper bound by
\deqn{min(\sqrt{(p_{i}q_{j})/(q_{i}p_{j})},\ \sqrt{(q_{i}p_{j})/(p_{i}q_{j})})}
Here, \eqn{q_{i} = 1 - p_{i}} and \eqn{q_{j} = 1 - p_{j}}.
Binary-Ordinal or Ordinal-Ordinal pairs: Randomly generated variables with the given marginal distributions are used in the
GSC algorithm to find the correlation bounds.
}
\section{Continuous Variables}{
Continuous variables are randomly generated using constants from \code{\link[SimMultiCorrData]{find_constants}} and a vector of sixth
cumulant correction values (if provided.) The GSC algorithm is used to find the lower and upper bounds.
}
\section{Poisson Variables}{
Poisson variables with the given means (lam) are randomly generated using the inverse cdf method. The Frechet-Hoeffding bounds
are used for the correlation bounds.
}
\section{Negative Binomial Variables}{
Negative Binomial variables with the given sizes and success probabilities (prob) or means (mu) are randomly generated using the
inverse cdf method. The Frechet-Hoeffding bounds are used for the correlation bounds.
}
\section{Continuous - Ordinal Pairs}{
Randomly generated ordinal variables with the given marginal distributions and the previously generated continuous variables are used in the
GSC algorithm to find the correlation bounds.
}
\section{Ordinal - Poisson Pairs}{
Randomly generated ordinal variables with the given marginal distributions and randomly generated Poisson variables with the given
means (lam) are used in the GSC algorithm to find the correlation bounds.
}
\section{Ordinal - Negative Binomial Pairs}{
Randomly generated ordinal variables with the given marginal distributions and randomly generated Negative Binomial variables with
the given sizes and success probabilities (prob) or means (mu) are used in the GSC algorithm to find the correlation bounds.
}
\section{Continuous - Poisson Pairs}{
The previously generated continuous variables and randomly generated Poisson variables with the given
means (lam) are used in the GSC algorithm to find the correlation bounds.
}
\section{Continuous - Negative Binomial Pairs}{
The previously generated continuous variables and randomly generated Negative Binomial variables with
the given sizes and success probabilities (prob) or means (mu) are used in the GSC algorithm to find the correlation bounds.
}
\section{Poisson - Negative Binomial Pairs}{
Poisson variables with the given means (lam) and Negative Binomial variables with the given sizes and success probabilities (prob)
or means (mu) are randomly generated using the inverse cdf method. The Frechet-Hoeffding bounds
are used for the correlation bounds.
}
\examples{
valid_corr(n = 1000, k_cat = 1, k_cont = 1, method = "Polynomial",
means = 0, vars = 1, skews = 0, skurts = 0, fifths = 0, sixths = 0,
marginal = list(c(1/3, 2/3)), rho = matrix(c(1, 0.4, 0.4, 1), 2, 2))
\dontrun{
# Binary, Ordinal, Continuous, Poisson, and Negative Binomial Variables
options(scipen = 999)
seed <- 1234
n <- 10000
# Continuous Distributions: Normal, t (df = 10), Chisq (df = 4),
# Beta (a = 4, b = 2), Gamma (a = 4, b = 4)
Dist <- c("Gaussian", "t", "Chisq", "Beta", "Gamma")
# calculate standardized cumulants
# those for the normal and t distributions are rounded to ensure the
# correct values (i.e. skew = 0)
M1 <- round(calc_theory(Dist = "Gaussian", params = c(0, 1)), 8)
M2 <- round(calc_theory(Dist = "t", params = 10), 8)
M3 <- calc_theory(Dist = "Chisq", params = 4)
M4 <- calc_theory(Dist = "Beta", params = c(4, 2))
M5 <- calc_theory(Dist = "Gamma", params = c(4, 4))
M <- cbind(M1, M2, M3, M4, M5)
M <- round(M[-c(1:2),], digits = 6)
colnames(M) <- Dist
rownames(M) <- c("skew", "skurtosis", "fifth", "sixth")
means <- rep(0, length(Dist))
vars <- rep(1, length(Dist))
# Binary and Ordinal Distributions
marginal <- list(0.3, 0.4, c(0.1, 0.5), c(0.3, 0.6, 0.9),
c(0.2, 0.4, 0.7, 0.8))
support <- list()
# Poisson Distributions
lam <- c(1, 5, 10)
# Negative Binomial Distributions
size <- c(3, 6)
prob <- c(0.2, 0.8)
ncat <- length(marginal)
ncont <- ncol(M)
npois <- length(lam)
nnb <- length(size)
# Create correlation matrix from a uniform distribution (-0.8, 0.8)
set.seed(seed)
Rey <- diag(1, nrow = (ncat + ncont + npois + nnb))
for (i in 1:nrow(Rey)) {
for (j in 1:ncol(Rey)) {
if (i > j) Rey[i, j] <- runif(1, -0.8, 0.8)
Rey[j, i] <- Rey[i, j]
}
}
# Test for positive-definiteness
library(Matrix)
if(min(eigen(Rey, symmetric = TRUE)$values) < 0) {
Rey <- as.matrix(nearPD(Rey, corr = T, keepDiag = T)$mat)
}
# Make sure Rey is within upper and lower correlation limits
valid <- valid_corr(k_cat = ncat, k_cont = ncont, k_pois = npois,
k_nb = nnb, method = "Polynomial", means = means,
vars = vars, skews = M[1, ], skurts = M[2, ],
fifths = M[3, ], sixths = M[4, ], marginal = marginal,
lam = lam, size = size, prob = prob, rho = Rey,
seed = seed)
}
}
\references{
Please see \code{\link[SimMultiCorrData]{rcorrvar}} for additional references.
Demirtas H & Hedeker D (2011). A practical way for computing approximate lower and upper correlation bounds.
American Statistician, 65(2): 104-109. \doi{10.1198/tast.2011.10090}.
Demirtas H, Hedeker D, & Mermelstein RJ (2012). Simulation of massive public health data by power polynomials.
Statistics in Medicine, 31(27): 3337-3346. \doi{10.1002/sim.5362}.
Emrich LJ & Piedmonte MR (1991). A Method for Generating High-Dimensional Multivariate Binary Variables. The American Statistician, 45(4): 302-4.
\doi{10.1080/00031305.1991.10475828}.
Frechet M. Sur les tableaux de correlation dont les marges sont donnees. Ann. l'Univ. Lyon SectA. 1951;14:53-77.
Hoeffding W. Scale-invariant correlation theory. In: Fisher NI, Sen PK, editors. The collected works of Wassily Hoeffding.
New York: Springer-Verlag; 1994. p. 57-107.
Hakan Demirtas, Yiran Hu and Rawan Allozi (2017). PoisBinOrdNor: Data Generation with Poisson, Binary, Ordinal and Normal Components.
R package version 1.4. \url{https://CRAN.R-project.org/package=PoisBinOrdNor}
}
\seealso{
\code{\link[SimMultiCorrData]{find_constants}}, \code{\link[SimMultiCorrData]{rcorrvar}}
}
\keyword{Binomial,}
\keyword{Fleishman,}
\keyword{Headrick,}
\keyword{Negative}
\keyword{Poisson,}
\keyword{bounds,}
\keyword{continuous,}
\keyword{correlation,}
\keyword{method1}
\keyword{ordinal,}
| /man/valid_corr.Rd | no_license | cran/SimMultiCorrData | R | false | true | 15,042 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/valid_corr.R
\name{valid_corr}
\alias{valid_corr}
\title{Determine Correlation Bounds for Ordinal, Continuous, Poisson, and/or Negative Binomial Variables: Correlation Method 1}
\usage{
valid_corr(k_cat = 0, k_cont = 0, k_pois = 0, k_nb = 0,
method = c("Fleishman", "Polynomial"), means = NULL, vars = NULL,
skews = NULL, skurts = NULL, fifths = NULL, sixths = NULL,
Six = list(), marginal = list(), lam = NULL, size = NULL,
prob = NULL, mu = NULL, rho = NULL, n = 100000, seed = 1234)
}
\arguments{
\item{k_cat}{the number of ordinal (r >= 2 categories) variables (default = 0)}
\item{k_cont}{the number of continuous variables (default = 0)}
\item{k_pois}{the number of Poisson variables (default = 0)}
\item{k_nb}{the number of Negative Binomial variables (default = 0)}
\item{method}{the method used to generate the k_cont continuous variables. "Fleishman" uses a third-order polynomial transformation
and "Polynomial" uses Headrick's fifth-order transformation.}
\item{means}{a vector of means for the k_cont continuous variables (i.e. = rep(0, k_cont))}
\item{vars}{a vector of variances (i.e. = rep(1, k_cont))}
\item{skews}{a vector of skewness values (i.e. = rep(0, k_cont))}
\item{skurts}{a vector of standardized kurtoses (kurtosis - 3, so that normal variables have a value of 0; i.e. = rep(0, k_cont))}
\item{fifths}{a vector of standardized fifth cumulants (not necessary for \code{method} = "Fleishman"; i.e. = rep(0, k_cont))}
\item{sixths}{a vector of standardized sixth cumulants (not necessary for \code{method} = "Fleishman"; i.e. = rep(0, k_cont))}
\item{Six}{a list of vectors of correction values to add to the sixth cumulants if no valid pdf constants are found,
ex: \code{Six = list(seq(0.01, 2,by = 0.01), seq(1, 10,by = 0.5))}; if no correction is desired for variable Y_i, set the i-th list
component equal to NULL}
\item{marginal}{a list of length equal to \code{k_cat}; the i-th element is a vector of the cumulative
probabilities defining the marginal distribution of the i-th variable;
if the variable can take r values, the vector will contain r - 1 probabilities (the r-th is assumed to be 1; default = list())}
\item{lam}{a vector of lambda (> 0) constants for the Poisson variables (see \code{\link[stats]{Poisson}})}
\item{size}{a vector of size parameters for the Negative Binomial variables (see \code{\link[stats]{NegBinomial}})}
\item{prob}{a vector of success probability parameters}
\item{mu}{a vector of mean parameters (*Note: either \code{prob} or \code{mu} should be supplied for all Negative Binomial variables,
not a mixture; default = NULL)}
\item{rho}{the target correlation matrix (\emph{must be ordered ordinal, continuous, Poisson, Negative Binomial}; default = NULL)}
\item{n}{the sample size (i.e. the length of each simulated variable; default = 100000)}
\item{seed}{the seed value for random number generation (default = 1234)}
}
\value{
A list with components:
\code{L_rho} the lower correlation bound
\code{U_rho} the upper correlation bound
If continuous variables are desired, additional components are:
\code{constants} the calculated constants
\code{sixth_correction} a vector of the sixth cumulant correction values
\code{valid.pdf} a vector with i-th component equal to "TRUE" if variable Y_i has a valid power method pdf, else "FALSE"
If a target correlation matrix rho is provided, each pairwise correlation is checked to see if it is within the lower and upper
bounds. If the correlation is outside the bounds, the indices of the variable pair are given.
}
\description{
This function calculates the lower and upper correlation bounds for the given distributions and
checks if a given target correlation matrix \code{rho} is within the bounds. It should be used before simulation with
\code{\link[SimMultiCorrData]{rcorrvar}}. However, even if all pairwise correlations fall within the bounds, it is still possible
that the desired correlation matrix is not feasible. This is particularly true when ordinal variables (r >= 2 categories) are
generated or negative correlations are desired. Therefore, this function should be used as a general check to eliminate pairwise correlations that are obviously
not reproducible. It will help prevent errors when executing the simulation.
Note: Some pieces of the function code have been adapted from Demirtas, Hu, & Allozi's (2017) \code{\link[PoisBinOrdNor]{validation_specs}}.
This function (\code{\link[SimMultiCorrData]{valid_corr}}) extends the methods to:
1) non-normal continuous variables generated by Fleishman's third-order or Headrick's fifth-order polynomial transformation method, and
2) Negative Binomial variables (including all pairwise correlations involving them).
Please see the \bold{Comparison of Method 1 and Method 2} vignette for more information regarding method 1.
}
\section{Reasons for Function Errors}{
1) The most likely cause for function errors is that no solutions to \code{\link[SimMultiCorrData]{fleish}} or
\code{\link[SimMultiCorrData]{poly}} converged when using \code{\link[SimMultiCorrData]{find_constants}}. If this happens,
the simulation will stop. It may help to first use \code{\link[SimMultiCorrData]{find_constants}} for each continuous variable to
determine if a vector of sixth cumulant correction values is needed. If the standardized cumulants are obtained from \code{calc_theory},
the user may need to use rounded values as inputs (i.e.
\code{skews = round(skews, 8)}). Due to the nature of the integration involved in \code{calc_theory}, the results are
approximations. Greater accuracy can be achieved by increasing the number of subdivisions (\code{sub}) used in the integration
process. For example, in order to ensure that skew is exactly 0 for symmetric distributions.
2) In addition, the kurtosis may be outside the region of possible values. There is an associated lower boundary for kurtosis associated
with a given skew (for Fleishman's method) or skew and fifth and sixth cumulants (for Headrick's method). Use
\code{\link[SimMultiCorrData]{calc_lower_skurt}} to determine the boundary for a given set of cumulants.
}
\section{The Generate, Sort, and Correlate (GSC, Demirtas & Hedeker, 2011, \doi{10.1198/tast.2011.10090}) Algorithm}{
The GSC algorithm is a flexible method for determining empirical correlation bounds when the theoretical bounds are unknown.
The steps are as follows:
1) Generate independent random samples from the desired distributions using a large number of observations (i.e. N = 100,000).
2) Lower Bound: Sort the two variables in opposite directions (i.e., one increasing and one decreasing) and find the sample correlation.
3) Upper Bound: Sort the two variables in the same direction and find the sample correlation.
Demirtas & Hedeker showed that the empirical bounds computed from the GSC method are similar to the theoretical bounds (when they are known).
}
\section{The Frechet-Hoeffding Correlation Bounds}{
Suppose two random variables \eqn{Y_{i}} and \eqn{Y_{j}} have cumulative distribution functions given by \eqn{F_{i}} and \eqn{F_{j}}.
Let U be a uniform(0,1) random variable, i.e. representing the distribution of the standard normal cdf. Then Hoeffing (1940) and
Frechet (1951) showed that bounds for the correlation between \eqn{Y_{i}} and \eqn{Y_{j}} are given by
\deqn{(corr(F_{i}^{-1}(U), F_{j}^{-1}(1-U)), corr(F_{i}^{-1}(U), F_{j}^{-1}(U)))}
The processes used to find the correlation bounds for each variable type are described below:
}
\section{Ordinal Variables}{
Binary pairs: The correlation bounds are determined as in Demirtas et al. (2012, \doi{10.1002/sim.5362}), who used the method of Emrich &
Piedmonte (1991, \doi{10.1080/00031305.1991.10475828}). The joint distribution is determined by "borrowing" the moments of a multivariate normal
distribution. For two binary variables \eqn{Y_{i}} and \eqn{Y_{j}}, with success probabilities \eqn{p_{i}} and \eqn{p_{j}}, the lower
correlation bound is given by
\deqn{max(-\sqrt{(p_{i}p_{j})/(q_{i}q_{j})},\ -\sqrt{(q_{i}q_{j})/(p_{i}p_{j})})}
and the upper bound by
\deqn{min(\sqrt{(p_{i}q_{j})/(q_{i}p_{j})},\ \sqrt{(q_{i}p_{j})/(p_{i}q_{j})})}
Here, \eqn{q_{i} = 1 - p_{i}} and \eqn{q_{j} = 1 - p_{j}}.
Binary-Ordinal or Ordinal-Ordinal pairs: Randomly generated variables with the given marginal distributions are used in the
GSC algorithm to find the correlation bounds.
}
\section{Continuous Variables}{
Continuous variables are randomly generated using constants from \code{\link[SimMultiCorrData]{find_constants}} and a vector of sixth
cumulant correction values (if provided.) The GSC algorithm is used to find the lower and upper bounds.
}
\section{Poisson Variables}{
Poisson variables with the given means (lam) are randomly generated using the inverse cdf method. The Frechet-Hoeffding bounds
are used for the correlation bounds.
}
\section{Negative Binomial Variables}{
Negative Binomial variables with the given sizes and success probabilities (prob) or means (mu) are randomly generated using the
inverse cdf method. The Frechet-Hoeffding bounds are used for the correlation bounds.
}
\section{Continuous - Ordinal Pairs}{
Randomly generated ordinal variables with the given marginal distributions and the previously generated continuous variables are used in the
GSC algorithm to find the correlation bounds.
}
\section{Ordinal - Poisson Pairs}{
Randomly generated ordinal variables with the given marginal distributions and randomly generated Poisson variables with the given
means (lam) are used in the GSC algorithm to find the correlation bounds.
}
\section{Ordinal - Negative Binomial Pairs}{
Randomly generated ordinal variables with the given marginal distributions and randomly generated Negative Binomial variables with
the given sizes and success probabilities (prob) or means (mu) are used in the GSC algorithm to find the correlation bounds.
}
\section{Continuous - Poisson Pairs}{
The previously generated continuous variables and randomly generated Poisson variables with the given
means (lam) are used in the GSC algorithm to find the correlation bounds.
}
\section{Continuous - Negative Binomial Pairs}{
The previously generated continuous variables and randomly generated Negative Binomial variables with
the given sizes and success probabilities (prob) or means (mu) are used in the GSC algorithm to find the correlation bounds.
}
\section{Poisson - Negative Binomial Pairs}{
Poisson variables with the given means (lam) and Negative Binomial variables with the given sizes and success probabilities (prob)
or means (mu) are randomly generated using the inverse cdf method. The Frechet-Hoeffding bounds
are used for the correlation bounds.
}
\examples{
valid_corr(n = 1000, k_cat = 1, k_cont = 1, method = "Polynomial",
means = 0, vars = 1, skews = 0, skurts = 0, fifths = 0, sixths = 0,
marginal = list(c(1/3, 2/3)), rho = matrix(c(1, 0.4, 0.4, 1), 2, 2))
\dontrun{
# Binary, Ordinal, Continuous, Poisson, and Negative Binomial Variables
options(scipen = 999)
seed <- 1234
n <- 10000
# Continuous Distributions: Normal, t (df = 10), Chisq (df = 4),
# Beta (a = 4, b = 2), Gamma (a = 4, b = 4)
Dist <- c("Gaussian", "t", "Chisq", "Beta", "Gamma")
# calculate standardized cumulants
# those for the normal and t distributions are rounded to ensure the
# correct values (i.e. skew = 0)
M1 <- round(calc_theory(Dist = "Gaussian", params = c(0, 1)), 8)
M2 <- round(calc_theory(Dist = "t", params = 10), 8)
M3 <- calc_theory(Dist = "Chisq", params = 4)
M4 <- calc_theory(Dist = "Beta", params = c(4, 2))
M5 <- calc_theory(Dist = "Gamma", params = c(4, 4))
M <- cbind(M1, M2, M3, M4, M5)
M <- round(M[-c(1:2),], digits = 6)
colnames(M) <- Dist
rownames(M) <- c("skew", "skurtosis", "fifth", "sixth")
means <- rep(0, length(Dist))
vars <- rep(1, length(Dist))
# Binary and Ordinal Distributions
marginal <- list(0.3, 0.4, c(0.1, 0.5), c(0.3, 0.6, 0.9),
c(0.2, 0.4, 0.7, 0.8))
support <- list()
# Poisson Distributions
lam <- c(1, 5, 10)
# Negative Binomial Distributions
size <- c(3, 6)
prob <- c(0.2, 0.8)
ncat <- length(marginal)
ncont <- ncol(M)
npois <- length(lam)
nnb <- length(size)
# Create correlation matrix from a uniform distribution (-0.8, 0.8)
set.seed(seed)
Rey <- diag(1, nrow = (ncat + ncont + npois + nnb))
for (i in 1:nrow(Rey)) {
for (j in 1:ncol(Rey)) {
if (i > j) Rey[i, j] <- runif(1, -0.8, 0.8)
Rey[j, i] <- Rey[i, j]
}
}
# Test for positive-definiteness
library(Matrix)
if(min(eigen(Rey, symmetric = TRUE)$values) < 0) {
Rey <- as.matrix(nearPD(Rey, corr = T, keepDiag = T)$mat)
}
# Make sure Rey is within upper and lower correlation limits
valid <- valid_corr(k_cat = ncat, k_cont = ncont, k_pois = npois,
k_nb = nnb, method = "Polynomial", means = means,
vars = vars, skews = M[1, ], skurts = M[2, ],
fifths = M[3, ], sixths = M[4, ], marginal = marginal,
lam = lam, size = size, prob = prob, rho = Rey,
seed = seed)
}
}
\references{
Please see \code{\link[SimMultiCorrData]{rcorrvar}} for additional references.
Demirtas H & Hedeker D (2011). A practical way for computing approximate lower and upper correlation bounds.
American Statistician, 65(2): 104-109. \doi{10.1198/tast.2011.10090}.
Demirtas H, Hedeker D, & Mermelstein RJ (2012). Simulation of massive public health data by power polynomials.
Statistics in Medicine, 31(27): 3337-3346. \doi{10.1002/sim.5362}.
Emrich LJ & Piedmonte MR (1991). A Method for Generating High-Dimensional Multivariate Binary Variables. The American Statistician, 45(4): 302-4.
\doi{10.1080/00031305.1991.10475828}.
Frechet M. Sur les tableaux de correlation dont les marges sont donnees. Ann. l'Univ. Lyon SectA. 1951;14:53-77.
Hoeffding W. Scale-invariant correlation theory. In: Fisher NI, Sen PK, editors. The collected works of Wassily Hoeffding.
New York: Springer-Verlag; 1994. p. 57-107.
Hakan Demirtas, Yiran Hu and Rawan Allozi (2017). PoisBinOrdNor: Data Generation with Poisson, Binary, Ordinal and Normal Components.
R package version 1.4. \url{https://CRAN.R-project.org/package=PoisBinOrdNor}
}
\seealso{
\code{\link[SimMultiCorrData]{find_constants}}, \code{\link[SimMultiCorrData]{rcorrvar}}
}
\keyword{Binomial,}
\keyword{Fleishman,}
\keyword{Headrick,}
\keyword{Negative}
\keyword{Poisson,}
\keyword{bounds,}
\keyword{continuous,}
\keyword{correlation,}
\keyword{method1}
\keyword{ordinal,}
|
library(rmarkdown);library(knitr);library(aws.s3)
## Render the R markdown file
render('/home/ubuntu/Git/Sensor_Alert/Sensor_Alert_Performance.Rmd')
## Adding the R environment file. This file contains the API used for analysis
readRenviron("/home/ubuntu/Git/API.Renviron")
## Add your AWS account details
Sys.setenv("AWS_ACCESS_KEY_ID" = Sys.getenv('AWS_ACCESS_KEY_ID'),
"AWS_SECRET_ACCESS_KEY" = Sys.getenv('AWS_SECRET_ACCESS_KEY'),
"AWS_DEFAULT_REGION" = "us-east-1")
## Putting the resulting HTML into the S3 bucket
put_object(file="/home/ubuntu/Git/Sensor_Alert/Sensor_Alert_Performance.html",bucket="sensoralert",
object="Sensor_Alert.html",acl="public-read")
| /.ipynb_checkpoints/Base_script-checkpoint.R | no_license | adeel1997/Sensor_Alert | R | false | false | 681 | r | library(rmarkdown);library(knitr);library(aws.s3)
## Render the R markdown file
render('/home/ubuntu/Git/Sensor_Alert/Sensor_Alert_Performance.Rmd')
## Adding the R environment file. This file contains the API used for analysis
readRenviron("/home/ubuntu/Git/API.Renviron")
## Add your AWS account details
Sys.setenv("AWS_ACCESS_KEY_ID" = Sys.getenv('AWS_ACCESS_KEY_ID'),
"AWS_SECRET_ACCESS_KEY" = Sys.getenv('AWS_SECRET_ACCESS_KEY'),
"AWS_DEFAULT_REGION" = "us-east-1")
## Putting the resulting HTML into the S3 bucket
put_object(file="/home/ubuntu/Git/Sensor_Alert/Sensor_Alert_Performance.html",bucket="sensoralert",
object="Sensor_Alert.html",acl="public-read")
|
library(crrSC)
### Name: center
### Title: Multicenter Bone Marrow transplantation data
### Aliases: center
### Keywords: datasets crrSC
### ** Examples
data(center)
| /data/genthat_extracted_code/crrSC/examples/center.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 171 | r | library(crrSC)
### Name: center
### Title: Multicenter Bone Marrow transplantation data
### Aliases: center
### Keywords: datasets crrSC
### ** Examples
data(center)
|
library("lme4")
library("nlme")
m_neigh <- read.csv("../results/ Neighbourhood.csv")
m_neigh$colonia <- paste(m_neigh$SETT_NAME, m_neigh$MUN_NAME, sep = "_")
m_neigh$days2 <- m_neigh$days^2
m_neigh$days3 <- m_neigh$days^3
model <- lmer(
value ~ Type + days + days2 | SETT_NAME,
data = m_neigh
)
model <- gls(
value~1+Type+days+days2+days3+colonia,
correlation=corAR1(form=~as.integer(as.character(days))|colonia),
method = "REML",
na.action = na.omit,
data = m_neigh
)
model <- lme(
value~1+Type+days+days2+days3+colonia,
random = list(
MUN_NAME=pdIdent(~1),
SETT_NAME=pdIdent(~1)
#colonia = pdIdent(~1)
),
method = "REML",
control = lmeControl(
niterEM = 150,
msMaxIter = 200,
returnObject = TRUE
),
na.action = na.omit,
data = m_neigh
)
par(mfrow=c(2,2))
plot(model)
| /analysis/006_mortality_kriging_plots/bin/model.R | no_license | KarolBL/Atlas-CDMX | R | false | false | 879 | r | library("lme4")
library("nlme")
m_neigh <- read.csv("../results/ Neighbourhood.csv")
m_neigh$colonia <- paste(m_neigh$SETT_NAME, m_neigh$MUN_NAME, sep = "_")
m_neigh$days2 <- m_neigh$days^2
m_neigh$days3 <- m_neigh$days^3
model <- lmer(
value ~ Type + days + days2 | SETT_NAME,
data = m_neigh
)
model <- gls(
value~1+Type+days+days2+days3+colonia,
correlation=corAR1(form=~as.integer(as.character(days))|colonia),
method = "REML",
na.action = na.omit,
data = m_neigh
)
model <- lme(
value~1+Type+days+days2+days3+colonia,
random = list(
MUN_NAME=pdIdent(~1),
SETT_NAME=pdIdent(~1)
#colonia = pdIdent(~1)
),
method = "REML",
control = lmeControl(
niterEM = 150,
msMaxIter = 200,
returnObject = TRUE
),
na.action = na.omit,
data = m_neigh
)
par(mfrow=c(2,2))
plot(model)
|
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%% CODE FUNCTION %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%% Function vector lag %%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Vector_target: a vector of size [T]
# - Nb_lag: number of lags
# - beginning: TRUE: lag at the beginning
# FALSE: lag at the end
# Output: new vector of size [T-nb_lag]
Vector_lag <- function(Vector_target, Nb_lag, beginning){
if (beginning==TRUE){
Vector_target[1:Nb_lag] <- NA
Vector_target <- Vector_target[!is.na(Vector_target)]
results <- as.vector(Vector_target)
return(results)
}else{
size_vector <- length(Vector_target)
Vector_target[(size_vector+1-Nb_lag):size_vector] <- NA
Vector_target <- Vector_target[!is.na(Vector_target)]
results <- as.vector(Vector_target)
return(results)
}
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%% Function matrix lag %%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Matrix_target: a matrix of size [T,n]
# - Nb_lag: number of lags
# - beginning: TRUE: lag at the beginning
# FALSE: lag at the end
# Output: Matrix of size [T-nb_lag,n]
Matrix_lag <- function(Matrix_target, Nb_lag, beginning){
ncol_matrix <- ncol(Matrix_target)
nrow_matrix <- nrow(Matrix_target) - Nb_lag
Var_transition<- matrix(0, nrow_matrix , ncol_matrix )
if (beginning==TRUE){
Matrix_target[1:Nb_lag,] <- NA
Matrix_target <- Matrix_target[!is.na(Matrix_target)]
for (compteur_col in 1:ncol_matrix){
Var_transition[,compteur_col] <- Matrix_target[ ( (compteur_col-1) * nrow_matrix + 1 ):( (compteur_col) * nrow_matrix ) ]
}
results <- as.matrix(Var_transition)
return(results)
}else{
Matrix_target[(nrow_matrix +1):(nrow_matrix + Nb_lag),] <- NA
Matrix_target <- Matrix_target[!is.na(Matrix_target)]
for (compteur_col in 1:ncol_matrix){
Var_transition[,compteur_col] <- Matrix_target[ ( (compteur_col-1) * nrow_matrix + 1 ):( (compteur_col) * nrow_matrix ) ]
}
results <- as.matrix(Var_transition)
return(results)
}
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Fuction EWS: seuil NSR %%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Var_Proba: vector of calculated probabilities
# - Dicho_Y: Binary variable
# - cutoff_interval
# Output:
# threshold with NSR method
EWS_NSR_Criterion <- function(Var_Proba, Dicho_Y, cutoff_interval){
nb_period <- length(Var_Proba)
# initialization Matrix results
Matrix_results <- matrix(data=0, nrow= (floor(1/cutoff_interval)+1) , ncol= 2)
counter_results <- 1
# test of each threshold with optim_interval parameter
for (optim_target in seq(0 , 1 , by = cutoff_interval ))
{
# initialization counter for matching and missmatching binary variable
counter_matching <- 0
counter_missmatching <- 0
# loop to compute matching and missmatching binary variable
for (counter_interval in 1:nb_period){
if (Var_Proba[counter_interval] >= optim_target){
dummy_dicho <- 1
}else{
dummy_dicho <- 0
}
if ((Dicho_Y[counter_interval] == 1) && (dummy_dicho == 1)){
counter_matching <- counter_matching + 1
}else if ((Dicho_Y[counter_interval] == 0) && (dummy_dicho == 1)){
counter_missmatching <- counter_missmatching + 1
}else{
counter_matching <- counter_matching
}
}
# recovery of results
if (counter_matching != 0){
ratio_tampon <- counter_missmatching /counter_matching
Matrix_results[counter_results, 1] <- optim_target
Matrix_results[counter_results, 2] <- ratio_tampon
}else{
Matrix_results[counter_results, 1] <- NA
Matrix_results[counter_results, 2] <- NA
}
counter_results <- counter_results + 1
}
optim_cutoff_NSR <- Matrix_results[order(Matrix_results[,2]),][1,1]
return(optim_cutoff_NSR)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%% Function EWS: AM Criteria %%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Var_Proba: vector of calculated probabilities
# - Dicho_Y: Binary variable
# - cutoff_interval
# Output:
# threshold with AM method
EWS_AM_Criterion <- function(Var_Proba, Dicho_Y, cutoff_interval){
nb_period <- length(Dicho_Y)
# number of 1 in Dicho_Y
counter_1 <- sum(Dicho_Y)
# number of 0 in Dicho_Y
counter_0 <- nb_period - counter_1
# initialization Matrix results
Matrix_results <- matrix(data=0, nrow= (floor(1/cutoff_interval)+1) , ncol= 2)
counter_results <- 1
# Loop in order to test each threshold with optim_interval parameter
for (optim_target in seq(0,1,cutoff_interval) ){
# Initilization of counter to calculate sensibility and specificity
counter_sensibility <- 0
counter_specificity <- 0
# Calculate sensibility and specificity counters
for (counter_interval in 1:nb_period){
if ( (Var_Proba[counter_interval] >= optim_target) && (Dicho_Y[counter_interval] == 1) )
{
counter_sensibility <- counter_sensibility + 1
}else if ( (Var_Proba[counter_interval] < optim_target) && (Dicho_Y[counter_interval] == 0) ){
counter_specificity <- counter_specificity + 1
}else{
}
}
# Calculate sensibility and specificity
Sensibility <- counter_sensibility / counter_1
Specificity <- counter_specificity / counter_0
# recovery of results
ratio_tampon <- abs(Sensibility + Specificity - 1)
Matrix_results[counter_results, 1] <- optim_target
Matrix_results[counter_results, 2] <- ratio_tampon
counter_results <- counter_results + 1
}
optim_cutoff_AM <- Matrix_results[order(Matrix_results[,2], decreasing = TRUE),][1,1]
return(optim_cutoff_AM)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Function EWS: seuil CSA %%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Var_Proba: vector of calculated probabilities
# - Dicho_Y: Binary variable
# - cutoff_interval
# Output:
# threshold with CSA method
EWS_CSA_Criterion <- function(Var_Proba, Dicho_Y , cutoff_interval){
nb_period <- length(Dicho_Y)
# number of 1 in Dicho_Y
counter_1 <- sum(Dicho_Y)
# number of 0 in Dicho_Y
counter_0 <- nb_period - counter_1
# initialization Matrix results
Matrix_results <- matrix(data=0, nrow= (floor(1/cutoff_interval)+1) , ncol= 2)
counter_results <- 1
# Initilization of counter to calculate sensibility and specificity
for (optim_target in seq(0,1,cutoff_interval) )
{
# Initilization of sensibility and specificity counters
counter_sensibility <- 0
counter_specificity <- 0
# Calculate sensibility and specificity counters
for (counter_interval in 1:nb_period){
if ( (Var_Proba[counter_interval] >= optim_target) && (Dicho_Y[counter_interval]==1) ){
counter_sensibility <- counter_sensibility + 1
}else if ((Var_Proba[counter_interval] < optim_target) && (Dicho_Y[counter_interval]==0) ){
counter_specificity <- counter_specificity + 1
}else{
}
}
# Calculate sensibility and specifity
Sensibility <- counter_sensibility / counter_1
Specificity <- counter_specificity / counter_0
# recovery of results
ratio_tampon <- abs(Sensibility - Specificity)
Matrix_results[counter_results, 1] <- optim_target
Matrix_results[counter_results, 2] <- ratio_tampon
counter_results <- counter_results + 1
}
optim_cutoff_CSA <- Matrix_results[order(Matrix_results[,2]),][1,1]
return(optim_cutoff_CSA)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%% Vector error %%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Intercept: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - Nb_Id: number of individuals
# - Lag: number of lag for the logistic estimation
# - type_model:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# Output:
# Vector of estimation errors
Vector_Error <- function(Dicho_Y, Exp_X, Intercept, Nb_Id, Lag, type_model)
{
# Estimation
logistic_results <- Logistic_Estimation(Dicho_Y, Exp_X, Intercept, Nb_Id, Lag, type_model)
# Error Calculation
Results <- as.vector(rep(0,length(logistic_results$prob))) # Initialization
Results <- Dicho_Y[1:length(logistic_results$prob)] - logistic_results$prob # Estimation
return(Results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%% Logistic Estimation Function - 4 models %%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Intercept: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - Nb_Id: number of individuals
# - Lag: number of lag for the logistic estimation
# - type_model:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# Output:
# - "coeff": Coefficients of the logit regression
# - "AIC" : AIC criteria value
# - "BIC" : BIC criteria value
# - "R2" : R2 value
# - "LogLik" : LogLikelihood value
# - "VarCov" : matrix VarCov
Logistic_Estimation <- function(Dicho_Y, Exp_X, Intercept, Nb_Id, Lag, type_model)
{
# --------------------------------------------------------------------------------
# ------------------------------- Data Processing --------------------------------
# --------------------------------------------------------------------------------
T_size <- length(Dicho_Y) # number of periods for all sample
Nb_periode <- T_size/Nb_Id # number of periods for each country
# Add the intercept in the matrix of explanatory variables if Intercept=TRUE
if (Intercept==TRUE){
Cste <- as.vector(rep(1,T_size))
Exp_X <- cbind(Cste, Exp_X)
}else{
}
# For the model 2 and the model 4, we add the binary crisis in the matrix of explanatory variables
if (type_model==2){
Exp_X <- cbind(Exp_X, Dicho_Y)
}else if (type_model==4){
Exp_X <- cbind(Exp_X, Dicho_Y)
}else{
}
# Creation of Var_X_Reg and Var_Y_Reg
# - Var_X_Reg: vector or matrix of explanatory variables for the logit regression
# - Var_Y_Reg: vector of the binary variable for the logit regression
if (Nb_Id == 1){ # If there is only one Id (univariate case)
if ( length(Exp_X) == length(Dicho_Y) ) {
# Initialization
Var_X_Reg <- as.vector( rep ( 0 , (Nb_periode- Lag) ) )
Var_Y_Reg <- as.vector( rep ( 0 , (Nb_periode- Lag) ) )
# data processing with lag
Var_X_Reg[ 1 : (Nb_periode - Lag) ] <- Vector_lag( (Exp_X[1:Nb_periode]) , Lag , FALSE )
Var_Y_Reg[ 1 : (Nb_periode - Lag) ] <- Vector_lag( (Dicho_Y[1:Nb_periode]) , Lag , TRUE )
} else {
# Initialization
Var_X_Reg <- matrix(data = 0, nrow = (Nb_periode- Lag), ncol = (ncol(Exp_X)) )
Var_Y_Reg <- as.vector(rep(0, (Nb_periode- Lag)))
# data processing with lag
Var_X_Reg[1:(Nb_periode- Lag),] <- Matrix_lag( (Exp_X[1:Nb_periode,]) , Lag , FALSE )
Var_Y_Reg[1:(Nb_periode- Lag)] <- Vector_lag( (Dicho_Y[1:Nb_periode]) , Lag , TRUE )
}
}else{ # balanced panel case
# Creation of the fixed effects matrix
Fixed_Effects <- matrix( data=0, ncol = Nb_Id-1 , nrow=T_size )
for ( compteur in 1:(Nb_Id-1) ) {
Fixed_Effects[((compteur-1)*Nb_periode+1):(compteur*Nb_periode),compteur] <- 1
}
# New matrix of explanatory variables
Exp_X <- cbind( Exp_X , Fixed_Effects )
# Initialization
Var_X_Reg <- matrix( data = 0 , nrow = (Nb_periode- Lag) * Nb_Id , ncol = ( ncol(Exp_X) ) )
Var_Y_Reg <- as.vector( rep ( 0 , Nb_Id * (Nb_periode- Lag) ) )
# data processing with lag
for (compteur_Id in 1:Nb_Id){
Var_X_Reg[ ( 1 + ( (compteur_Id-1) * (Nb_periode- Lag) ) ) : ( (compteur_Id) * (Nb_periode- Lag) ),] <- Matrix_lag( (Exp_X[ ( 1 + ( (compteur_Id-1) * Nb_periode ) ) :( (compteur_Id) * Nb_periode ),]) , Lag , FALSE )
Var_Y_Reg[ ( 1 + ( (compteur_Id-1) * (Nb_periode- Lag) ) ) : ( compteur_Id * (Nb_periode- Lag) ) ] <- Vector_lag( (Dicho_Y[( 1 + ( (compteur_Id-1) * Nb_periode ) ) :( (compteur_Id) * Nb_periode )]) , Lag , TRUE )
}
}
# --------------------------------------------------------------------------------
# ----------------------------- Logistic Estimation ------------------------------
# --------------------------------------------------------------------------------
# ----------------------------------- Model 1 -----------------------------------
if (type_model==1) {
if (length(Var_X_Reg)==length(Var_Y_Reg)){
# Initialization - simple OLS
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
coeff_initialize <- coeff_initialize/0.25
}else{
# Initialization - simple OLS
coeff_initialize <- as.vector(rep(0,ncol(Var_X_Reg)))
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
if (Intercept==TRUE) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else {
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
}
F_mod1 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last))
beta <- par[1:(last)]
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
if (length(Var_X_Reg)==T_size_function) {
expf[compteur] <- exp(Var_X_Reg[compteur] %*% beta)/(1+exp(Var_X_Reg[compteur] %*% beta))
}else{
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% beta)/(1+exp(Var_X_Reg[compteur,] %*% beta))
}
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# log-likelihood maximization function
return(-sum(lik))
}
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
results <- optim(coeff_initialize, F_mod1, gr = NULL,
lower = -1, upper = 1, method = "Brent", control = list(maxit = 50000, factr = TRUE, abstol=0.00001), hessian = FALSE)
}else{
results <- optim(coeff_initialize, F_mod1, gr = NULL, method = "Nelder-Mead", control = list(maxit = 50000, factr = TRUE, abstol=0.00001), hessian = FALSE)
}
# Estimated parameters
if (length(Var_X_Reg)==length(Var_Y_Reg)){
Beta <- results$par[1]
}else{
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
}
# Approximation Hessian Matrix
hessc <- hessian(func=F_mod1, x=Beta , "Richardson")
# Initialisation of the vector of index
T_size <- length(Var_Y_Reg)
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialization of the vector of density
pdf <- as.vector(rep(0,T_size))
for (compteur in 1:T_size) {
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
expf[compteur] <- exp(Var_X_Reg[compteur] %*% Beta)/(1+exp(Var_X_Reg[compteur] %*% Beta))
pdf[compteur] <- exp(Var_X_Reg[compteur] %*% Beta)/((1+exp(Var_X_Reg[compteur] %*% Beta))^2)
}else{
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/(1+exp(Var_X_Reg[compteur,] %*% Beta))
pdf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/((1+exp(Var_X_Reg[compteur,] %*% Beta))^2)
}
}
# The vector of estimated probabilities
prob <- as.vector(rep(0,T_size))
for (compteur in 1:T_size) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Matrix of explanatory variables
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
X <- as.vector(rep(0,T_size-1))
X <- Var_X_Reg[2:T_size]
}else{
X <- matrix(0,nrow=T_size-1,ncol=(ncol(Var_X_Reg)))
X <- Var_X_Reg[2:T_size,]
}
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
gradient <- as.vector(rep(0,(T_size-1)))
gradient <- vect_gradient
}else{
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)))
for (compteur in 1:(ncol(Var_X_Reg))) {
gradient[,compteur] <- vect_gradient
}
}
gradient <- gradient * X
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)) {
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)) {
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
if (length(Var_X_Reg)==length(Var_Y_Reg)){
Covariance <- t(gradient[(1+compteur):(T_size-1)]) %*% gradient[1:(T_size-1-compteur)]/(T_size-compteur)
}else{
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
}
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
# Estimated parameters
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
result_param <- Beta
ind <- as.vector(rep(0,T_size))
ind <- Var_X_Reg * Beta
}else{
result_param <- as.vector(rep(0,ncol(Var_X_Reg)))
result_param <- Beta
ind <- as.vector(rep(0,T_size))
ind <- Var_X_Reg %*% Beta
}
# Asymptotic Matrix of Var-Cov
V <- solve(hessc)
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- -results$value
if (length(Var_X_Reg)==length(Var_Y_Reg)){
# AIC information criteria
AIC <- -2*loglikelihood + 1
# BIC information criteria
BIC <- -2*loglikelihood + log(T_size)
}else{
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg))*log(T_size)
}
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}else{
if (length(Var_X_Reg)==length(Var_Y_Reg)){
nb_Var_X <- 1
nameVarX <- 1
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X))
nameVarX <- c(1:(nb_Var_X))
}
}
name <- as.vector(rep(0,nb_Var_X))
Estimate <- as.vector(rep(0,nb_Var_X))
Std.Error <- as.vector(rep(0,nb_Var_X))
zvalue <- as.vector(rep(0,nb_Var_X))
Pr <- as.vector(rep(0,nb_Var_X))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
Coeff.results <- data.frame(
name = c("Intercept" ,nameVarX),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
# DataFrame with coefficients and significativity
Coeff.results <- data.frame(
name = c(nameVarX),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
# ----------------------------------- Modele 2 -----------------------------------
} else if (type_model==2) {
# Initialization
coeff_initialize <- as.vector(rep(0,ncol(Var_X_Reg)))
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
if (Intercept==TRUE) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else{
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
F_mod2 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last))
beta <- par[1:(last)]
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% beta)/(1+exp(Var_X_Reg[compteur,] %*% beta))
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# log-likelihood maximization function
return(-sum(lik))
}
results <- optim(coeff_initialize, F_mod2, gr = NULL, method = "Nelder-Mead", control = list(maxit = 500000, factr = TRUE, abstol=0.00001), hessian = FALSE)
# Estimated parameters
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
# Approximation Hessian Matrix
hessc <- hessian(func=F_mod2, x=Beta , "Richardson")
# Initialization of the vector of index
T_size <- length(Var_Y_Reg)
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialization of the vector of density
pdf <- as.vector(rep(0,T_size))
for (compteur in 1:T_size) {
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/(1+exp(Var_X_Reg[compteur,] %*% Beta))
pdf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/((1+exp(Var_X_Reg[compteur,] %*% Beta))^2)
}
# The vector of estimated probabilities
prob <- as.vector(T_size)
for (compteur in 1:T_size) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Matrix of explicative variables
X <- matrix(0,nrow=T_size-1,ncol=(ncol(Var_X_Reg)))
X <- Var_X_Reg[2:T_size,]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)))
for (compteur in 1:(ncol(Var_X_Reg))) {
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)){
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)){
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
# Estimated parameters
result_param <- as.vector(rep(0,ncol(Var_X_Reg)))
result_param <- Beta
ind <- as.vector(rep(0,T_size))
ind <- Var_X_Reg %*% Beta
# Asymptotic Matrix of Var-Cov
V <- solve(hessc)
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- -results$value
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg))*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-2))
nameVarX <- c(1:(nb_Var_X-2))
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}
name <- as.vector(rep(0,nb_Var_X))
Estimate <- as.vector(rep(0,nb_Var_X))
Std.Error <- as.vector(rep(0,nb_Var_X))
zvalue <- as.vector(rep(0,nb_Var_X))
Pr <- as.vector(rep(0,nb_Var_X))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
if (Nb_Id > 1){
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX[1:(nb_Var_X-2-Nb_Id+1)] ,"Binary_Lag", nameVarX[(nb_Var_X-2-Nb_Id+2):(nb_Var_X-2)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX ,"Binary_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}else{
# DataFrame with coefficients and significativity
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c(nameVarX[1:(nb_Var_X-1-Nb_Id+1)] ,"Binary_Lag", nameVarX[(nb_Var_X-1-Nb_Id+2):(nb_Var_X-1)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c(nameVarX ,"Binary_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
# ----------------------------------- Modele 3 -----------------------------------
} else if (type_model==3){
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
# Initialization of coefficients
coeff_initialize <- as.vector( rep(0,2) )
coeff_initialize[1] <- solve( t(Var_X_Reg) %*% (Var_X_Reg) ) %*% t(Var_X_Reg) %*% (Var_Y_Reg)
# Initialization of the Index
Pi <- 0
coeff_initialize[2] <- Pi
}else{
# Initialization of coefficients
coeff_initialize <- as.vector( rep ( 0, ncol(Var_X_Reg) + 1 ) )
coeff_initialize[1:ncol(Var_X_Reg)] <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
# Initialization of the Index
Pi <- 0
coeff_initialize[ncol(Var_X_Reg)+1] <- Pi
}
if (Intercept==TRUE) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else{
if ( length(Var_X_Reg) == length(Var_X_Reg) ) {
coeff_initialize <- coeff_initialize/0.25
}else{
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
}
F_mod3 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last-1))
beta <- par[1:(last-1)]
# Parameter of the lagged index (here a logistic transformation)
alpha <- par[last]/(1+abs(par[last]))
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of the index
ind <- as.vector(rep(0,T_size_function))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
# Initial value for the index
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
mean_Var_X[compteur] <- mean(Var_X_Reg)
p0 <- mean_Var_X * beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
} else {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
p0 <- mean_Var_X %*% beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
}
}
for (compteur in 2:T_size_function) {
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur] * beta
} else {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% beta
}
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# We do not consider the initial condition on the index
estim_lik <- as.vector(rep(0,(T_size_function-1)))
estim_lik <- lik[2:T_size_function]
# log-likelihood maximization function
return(-sum(lik))
}
results <- optim(coeff_initialize, F_mod3, gr = NULL, method = "Nelder-Mead", control = list(maxit = 500000, factr = TRUE, abstol=0.00001), hessian = FALSE)
# Logistic transformation of alpha (inverse)
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
psi <- results$par[2]
# Estimated parameter alpha
alpha <- psi/(1+abs(psi))
# Intermediate element required by tge analytical gradient
h <- 0.000001
# Analytical Gradient for alpha / psi
d2 <- (((psi+h)/(1+abs(psi+h)))-((psi-h)/(1+abs(psi-h))))/(2*h)
# Matrix of Taylor development of the logistic function
C <- diag(2)
C[2,2] <- d2
}else{
psi <- results$par[ncol(Var_X_Reg)+1]
# Estimated parameter alpha
alpha <- psi/(1+abs(psi))
# Intermediate element required by tge analytical gradient
h <- 0.000001
# Analytical Gradient for alpha / psi
d2 <- (((psi+h)/(1+abs(psi+h)))-((psi-h)/(1+abs(psi-h))))/(2*h)
# Matrix of Taylor development of the logistic function
C <- diag(ncol(Var_X_Reg)+1)
C[(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1)] <- d2
}
# =======================
# === Correction term ===
# =======================
# Estimated parameters
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
Beta <- results$par[1]
}else{
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
}
# Initialization of the vector of index
T_size <- length(Var_Y_Reg)
ind <- as.vector(rep(0,T_size))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialization of the vector of density
pdf <- as.vector(rep(0,T_size))
# Initial value for the index
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
last <- 2
mean_Var_X <- mean(Var_X_Reg)
p0 <- mean_Var_X * Beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
pdf[1] <- exp(ind[1])/((1+exp(ind[1]))^2)
for (compteur in 2:T_size) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur] * Beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
pdf[compteur] <- exp(ind[compteur])/((1+exp(ind[compteur]))^2)
}
}else{
last <- ncol(Var_X_Reg)+1
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
}
p0 <- mean_Var_X %*% Beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
pdf[1] <- exp(ind[1])/((1+exp(ind[1]))^2)
for (compteur in 2:T_size) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% Beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
pdf[compteur] <- exp(ind[compteur])/((1+exp(ind[compteur]))^2)
}
}
# ==============================
# === Robust standard Errors ===
# ==============================
# The vector of estimated probabilities
prob <- as.vector(T_size)
for (compteur in 1:T_size) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Initial value for the index
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
# Matrix of explanatory variables (the first of observation is equal to p0)
X <- matrix(0,nrow=(T_size-1),ncol=2)
X[,1] <- Var_X_Reg[2:T_size]
X[,2] <- ind[1:(T_size-1)]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,(T_size-1)))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=2)
for (compteur in 1:2){
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
}else{
# Matrix of explanatory variables (the first of observation is equal to p0)
X <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
X[,1:(ncol(Var_X_Reg))] <- Var_X_Reg[2:T_size,]
X[,ncol(Var_X_Reg)+1] <- ind[1:(T_size-1)]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
for (compteur in 1:(ncol(Var_X_Reg)+1)) {
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
}
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)) {
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)){
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
# Parameter alpha
alpha <- psi / (1+abs(psi))
# Estimated parameters
result_param <- as.vector(rep(0,2))
result_param <- c(Beta, alpha)
# Hessian matrix
hessc <- matrix(0,2,2)
hessc <- hessian(func=F_mod3, x=result_param , "Richardson")
# Non-robust variance-covariance matrix in the new space
V <- C %*% solve(hessc) %*% C
# Asymptotic Matrix of Var-Cov
V_nonrobust <- V
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V_nonrobust))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- - results$value
# AIC information criteria
AIC <- -2*loglikelihood + 2
# BIC information criteria
BIC <- -2*loglikelihood + 2*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -1)/Lc)
}else{
# Parameter alpha
alpha <- psi / (1+abs(psi))
# Estimated parameters
result_param <- as.vector(rep(0,ncol(Var_X_Reg)+1))
result_param <- c(Beta, alpha)
# Hessian matrix
hessc <- matrix(0,(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1))
hessc <- hessian(func=F_mod3, x=result_param , "Richardson")
# Non-robust variance-covariance matrix in the new space
V <- C %*% solve(hessc) %*% C
# Asymptotic Matrix of Var-Cov
V_nonrobust <- V
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V_nonrobust))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- - results$value
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)+1
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg)+1)*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
}
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}else{
if (length(Var_X_Reg)==length(Var_Y_Reg)){
nb_Var_X <- 1
nameVarX <- 1
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X))
nameVarX <- c(1:(nb_Var_X))
}
}
name <- as.vector(rep(0,nb_Var_X+1))
Estimate <- as.vector(rep(0,nb_Var_X+1))
Std.Error <- as.vector(rep(0,nb_Var_X+1))
zvalue <- as.vector(rep(0,nb_Var_X+1))
Pr <- as.vector(rep(0,nb_Var_X+1))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX[1:(nb_Var_X-1-Nb_Id+1)] ,"Index_Lag", nameVarX[(nb_Var_X-1-Nb_Id+2):(nb_Var_X-1)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX ,"Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}else{
# DataFrame with coefficients and significativity
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c(nameVarX[1:(nb_Var_X-Nb_Id+1)] ,"Index_Lag", nameVarX[(nb_Var_X-Nb_Id+2):(nb_Var_X)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c(nameVarX ,"Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
# ----------------------------------- Modele 4 -----------------------------------
} else if (type_model==4) {
# Initialization of coefficients
coeff_initialize <- as.vector(rep(0,ncol(Var_X_Reg)+1))
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
# Initialisation of the index
Pi <- 0
coeff_initialize[ncol(Var_X_Reg)+1] <- Pi
if (Intercept==1) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else{
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
F_mod4 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last-1))
beta <- par[1:(last-1)]
# Parameter of the lagged index (here a logistic transformation)
alpha <- par[last]/(1+abs(par[last]))
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of the index
ind <- as.vector(rep(0,T_size_function))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
# Initial value for the index
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
}
p0 <- mean_Var_X %*% beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
for (compteur in 2:T_size_function) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# We do not consider the initial condition on the index
estim_lik <- as.vector(rep(0,(T_size_function-1)))
estim_lik <- lik[2:T_size_function]
# log-likelihood maximization function
return(-sum(lik))
}
results <- optim(coeff_initialize, F_mod4, gr = NULL, method = "Nelder-Mead", control = list(maxit = 500000, factr = TRUE, abstol=0.00001), hessian = FALSE)
# Logistic transformation of alpha (inverse)
psi <- results$par[ncol(Var_X_Reg)+1]
# Estimated parameter alpha
alpha <- psi/(1+abs(psi))
# Intermediate element required by tge analytical gradient
h <- 0.000001
# Analytical Gradient for alpha / psi
d2 <- (((psi+h)/(1+abs(psi+h)))-((psi-h)/(1+abs(psi-h))))/(2*h)
# Matrix of Taylor development of the logistic function
C <- diag(ncol(Var_X_Reg)+1)
C[(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1)] <- d2
# =======================
# === Correction term ===
# =======================
# Estimated parameters
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
# Initialization of the vector of index
T_size <- length(Var_Y_Reg)
ind <- as.vector(rep(0,T_size))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialisation of the vector of density
pdf <- as.vector(rep(0,T_size))
# Initial value for the index
last <- ncol(Var_X_Reg)+1
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
}
p0 <- mean_Var_X %*% Beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
pdf[1] <- exp(ind[1])/((1+exp(ind[1]))^2)
for (compteur in 2:T_size){
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% Beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
pdf[compteur] <- exp(ind[compteur])/((1+exp(ind[compteur]))^2)
}
# ==============================
# === Robust standard Errors ===
# ==============================
# The vector of estimated probabilities
prob <- as.vector(rep(0,T_size))
for (compteur in 1:T_size){
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Matrix of explanatory variables (the first of observation is equal to p0)
X <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
X[,1:(ncol(Var_X_Reg))] <- Var_X_Reg[2:T_size,]
X[,ncol(Var_X_Reg)+1] <- ind[1:(T_size-1)]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
for (compteur in 1:(ncol(Var_X_Reg)+1)) {
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)) {
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)){
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
# Parameter alpha
alpha <- psi / (1+abs(psi))
# Estimated parameters
result_param <- as.vector(rep(0,ncol(Var_X_Reg)+1))
result_param <- c(Beta, alpha)
# Hessian matrix
hessc <- matrix(0,(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1))
hessc <- hessian(func=F_mod4, x=result_param , "Richardson")
# Non-robust variance-covariance matrix in the new space
V <- C %*% solve(hessc) %*% C
# Asymptotic Matrix of Var-Cov
V_nonrobust <- V
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V_nonrobust))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- -results$value
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)+1
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg)+1)*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-2))
nameVarX <- c(1:(nb_Var_X-2))
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}
name <- as.vector(rep(0,nb_Var_X+1))
Estimate <- as.vector(rep(0,nb_Var_X+1))
Std.Error <- as.vector(rep(0,nb_Var_X+1))
zvalue <- as.vector(rep(0,nb_Var_X+1))
Pr <- as.vector(rep(0,nb_Var_X+1))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX[1:(nb_Var_X-2-Nb_Id+1)],"Binary_Lag" ,"Index_Lag", nameVarX[(nb_Var_X-2-Nb_Id+2):(nb_Var_X-2)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX , "Binary_Lag", "Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}else{
# DataFrame with coefficients and significativity
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c(nameVarX[1:(nb_Var_X-1-Nb_Id+1)],"Binary_Lag" ,"Index_Lag", nameVarX[(nb_Var_X-1-Nb_Id+2):(nb_Var_X-1)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c(nameVarX , "Binary_Lag", "Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
}
return(results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Function BlockBootstrapp %%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Intercept: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - n_simul: number of simulations for the block bootstrapp
# Output:
# Matrix with bootstrapp series
BlockBootstrapp <- function(Dicho_Y, Exp_X, Intercept, n_simul)
{
# optimal block size
size_block <- floor(length(Dicho_Y)^(1/5))
if (length(Dicho_Y)==length(Exp_X) && Intercept==TRUE){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)), Exp_X), ncol=2, nrow=length(Dicho_Y))
}else if (length(Dicho_Y)!=length(Exp_X) && Intercept==TRUE){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)), Exp_X), ncol=(1+ncol(Exp_X)), nrow=length(Dicho_Y))
}
# number of colomns for simulation matrix
if (length(Dicho_Y)==length(Exp_X))
{
nvalue <- 2 # 1 explanatory variable + 1 binary variable
}else
{
nvalue <- ncol(Exp_X)+1 # n explanatory variables + 1 binary variable
}
# Initialization of matrix results
matrix_results <- matrix(data=0, ncol= (nvalue*n_simul), nrow=length(Dicho_Y))
for (compteur_simul in 1:n_simul)
{
# block position
block_position <- sample(1:(length(Dicho_Y)-size_block+1),1)
# block recovery
block_value <- matrix(data=0, ncol= nvalue, nrow= size_block )# initialisation de la taille du block
if (length(Dicho_Y)==length(Exp_X))
{
block_value[,1] <- Dicho_Y[block_position:(block_position+size_block-1)]
block_value[,2] <- Exp_X[block_position:(block_position+size_block-1)]
} else {
block_value[,1] <- Dicho_Y[block_position:(block_position+size_block-1)]
block_value[,2:nvalue] <- Exp_X[block_position:(block_position+size_block-1),]
}
# Recovery of results
if (length(Dicho_Y)==length(Exp_X))
{
matrix_results[1:(length(Dicho_Y)-size_block),(1+(compteur_simul-1)*nvalue)] <- Dicho_Y[(size_block+1):(length(Dicho_Y))]
matrix_results[1:(length(Dicho_Y)-size_block),(compteur_simul*nvalue)] <- Exp_X[(size_block+1):(length(Dicho_Y))]
matrix_results[(length(Dicho_Y)-size_block+1):length(Dicho_Y),(1+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)] <- block_value
} else {
matrix_results[1:(length(Dicho_Y)-size_block),(1+(compteur_simul-1)*nvalue)] <- Dicho_Y[(size_block+1):(length(Dicho_Y))]
matrix_results[1:(length(Dicho_Y)-size_block),(2+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)] <- Exp_X[(size_block+1):(length(Dicho_Y)),]
matrix_results[(length(Dicho_Y)-size_block+1):length(Dicho_Y),(1+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)] <- block_value
}
}
return(matrix_results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%% GIRF function %%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Lag: number of lag to calculate the GIRF
# - Int: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - t_mod:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# - horizon: horizon target for the GIRF analysis
# - shock_size: size of the shock
# - OC: threshold to determine the value of the dichotomous variable as a function of the index level
# Output:
# Matrix with:
# - column 1: horizon
# - column 2: index
# - column 3: index with shock
# - column 4: probability associated to the index
# - column 5: probability associated to the index with shock
# - column 6: binary variable associated to the index
# - column 7: binary variable associated to the index with shock
GIRF_Dicho <- function(Dicho_Y, Exp_X, Lag, Int, t_mod, horizon, shock_size, OC)
{
# Initialization
matrix_results <- matrix(data=0,nrow=(horizon+1), ncol=7)
for (compteur_horizon in 0:horizon)
{
if (compteur_horizon==0)
{
# index at time 0
results_forecast <- Logistic_Estimation(Dicho_Y, Exp_X, Int, 1, Lag, t_mod)
# estimated coefficients
Coeff_estimated <- as.vector(rep(0,length(results_forecast$Estimation[,2]))) # Initialization
Coeff_estimated <- results_forecast$Estimation[,2]
# last explanatory variables
Last_Exp_X <- as.vector(rep(0,length(Coeff_estimated)))
# Data Processing
if (Int==TRUE && length(Dicho_Y)==length(Exp_X)){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)),Exp_X), nrow=length(Dicho_Y) , ncol=2 )
} else if (Int==TRUE && length(Dicho_Y)!=length(Exp_X)){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)),Exp_X), nrow=length(Dicho_Y) , ncol=(1+ncol(Exp_X)) )
}
if (t_mod==1){
# model 1: Exp_X
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)])
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),])
}
} else if (t_mod==2) {
# model 2: Exp_X + Dicho_Y
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)], Dicho_Y[length(Dicho_Y)])
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),], Dicho_Y[length(Dicho_Y)])
}
} else if (t_mod==3) {
# model 3: Exp_X + Index
Last_Index <- results_forecast$index[length(results_forecast$index)] # Recover last index
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)], Last_Index)
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),], Last_Index)
}
} else if (t_mod==4) {
# model 4: Exp_X + Index + Dicho_Y
Last_Index <- results_forecast$index[length(results_forecast$index)] # Recover last index
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)], Dicho_Y[length(Dicho_Y)], Last_Index)
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),], Dicho_Y[length(Dicho_Y)], Last_Index)
}
}
# Recovery of the index and index with shock
index <- results_forecast$index[length(results_forecast$index)]
proba <- exp(index)/(1+exp(index))
index_shock <- index + shock_size
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else {
# horizon > 0
if (t_mod==1)
{
# Index recovery and associated probability
index <- results_forecast$index[length(results_forecast$index)]
proba <- exp(index)/(1+exp(index))
# Index with shock recovery and associated probability
index_shock <- results_forecast$index[length(results_forecast$index)]
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else if (t_mod==2)
{
# Calculation of the new index
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],Var_Dicho) # last values of X and last binary
index <- Coeff_estimated %*% Last_Exp_X
proba <- exp(index)/(1+exp(index))
# Calculation of the new index with shock
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],Var_Dicho_shock) # last values of X and last binary with shock
index_shock <- Coeff_estimated %*% Last_Exp_X
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else if (t_mod==3)
{
# Calculation of the new index
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],index) # last values of X and last index
index <- Coeff_estimated %*% Last_Exp_X
proba <- exp(index)/(1+exp(index))
# Calculation of the new index with shock
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],index_shock) # last values of X and last index with shock
index_shock <- Coeff_estimated %*% Last_Exp_X
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else if (t_mod==4)
{
# Calculation of the new index
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-2)],Var_Dicho, index) # last values of X, last binary and last index
index <- Coeff_estimated %*% Last_Exp_X
proba <- exp(index)/(1+exp(index))
# Calculation of the new index with shock
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-2)], Var_Dicho_shock, index_shock) # last values of X, last binary with shock and last index with shock
index_shock <- Coeff_estimated %*% Last_Exp_X
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
}
}
matrix_results[compteur_horizon + 1,1] <- compteur_horizon
matrix_results[compteur_horizon + 1,2] <- index
matrix_results[compteur_horizon + 1,3] <- index_shock
matrix_results[compteur_horizon + 1,4] <- proba
matrix_results[compteur_horizon + 1,5] <- proba_shock
matrix_results[compteur_horizon + 1,6] <- Var_Dicho
matrix_results[compteur_horizon + 1,7] <- Var_Dicho_shock
}
return(matrix_results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% GIRF pour Intervale de confiance %%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Int: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - Lag: number of lag for the logistic estimation
# - t_mod:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# - n_simul: number of simulations for the bootstrap
# - centile_shock: percentile of the shock from the estimation errors
# - horizon: horizon
# - OC: either a value or the name of the optimal cut-off / threshold ("NSR", "CSA", "AM")
#
# Output:
# Matrix where for each simulation there are 7 columns with:
# - column 1: horizon
# - column 2: index
# - column 3: index with shock
# - column 6: probability associated to the index
# - column 7: probability associated to the index with shock
# - column 6: binary variable associated to the index
# - column 7: binary variable associated to the index with shock
Simul_GIRF <- function(Dicho_Y, Exp_X, Int, Lag, t_mod, n_simul, centile_shock, horizon, OC)
{
# Initialization of the results matrix
matrix_results <- matrix(data=0,ncol=(7*n_simul), nrow=(horizon+1))
# number of values for the bootstrapp and results
if (length(Dicho_Y)==length(Exp_X) && Int==FALSE)
{
nvalue <- 2 # 1 explanatory variable + 1 binary variable
}else if (length(Dicho_Y)==length(Exp_X) && Int==TRUE){
nvalue <- 3 # 1 explanatory variable + 1 binary variable + 1 Intercept
}else if (length(Dicho_Y)!=length(Exp_X) && Int==FALSE){
nvalue <- ncol(Exp_X)+1 # n explanatory variables + 1 binary variable
}else{
nvalue <- ncol(Exp_X)+2 # n explanatory variables + 1 binary variable + 1 Intercept
}
# Block Bootstrap estimation
matrice_bootstrap <- matrix(data=0,nrow=length(Dicho_Y),ncol= n_simul*nvalue)
matrice_bootstrap <- BlockBootstrapp(Dicho_Y, Exp_X, Int, n_simul)
# Estimation of coefficients and errors for each simulation
for (compteur_simul in 1:n_simul)
{
# Vector of binary variable
Dicho_Y_bootstrap <- matrix(data=0,ncol=1,nrow=length(Dicho_Y))
Dicho_Y_bootstrap <- matrice_bootstrap[,1+(compteur_simul-1)*nvalue]
# Matrix of explanatory variables
Exp_X_bootstrap <- matrix(data=0,ncol=(nvalue-1),nrow=length(Dicho_Y))
Exp_X_bootstrap <- matrice_bootstrap[,(2+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)]
if (OC=="NSR") {
# Regression
Results_serie_bootstrap <- Logistic_Estimation(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of estimated probabilities
vecteur_proba_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_proba_bootstrap <- Results_serie_bootstrap$prob
# Recovery of the binary variable
vecteur_binary_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_binary_bootstrap <- Dicho_Y_bootstrap[(1+Lag):length(Dicho_Y_bootstrap)]
pas_optim <- 0.0001 # intervals for OC (optimal cut-off) estimation
# NSR calculation
threshold_estimated <- EWS_NSR_Criterion(vecteur_proba_bootstrap, vecteur_binary_bootstrap, pas_optim)
} else if (OC=="CSA") {
# Regression
Results_serie_bootstrap <- Logistic_Estimation(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of estimated probabilities
vecteur_proba_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_proba_bootstrap <- Results_serie_bootstrap$prob
# Recovery of the binary variable
vecteur_binary_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_binary_bootstrap <- Dicho_Y_bootstrap[(1+Lag):length(Dicho_Y_bootstrap)]
pas_optim <- 0.0001 # intervals for OC (optimal cut-off) estimation
# CSA calculation
threshold_estimated <- EWS_CSA_Criterion(vecteur_proba_bootstrap, vecteur_binary_bootstrap, pas_optim)
} else if (OC=="AM") {
# Regression
Results_serie_bootstrap <- Logistic_Estimation(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of estimated probabilities
vecteur_proba_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_proba_bootstrap <- Results_serie_bootstrap$prob
# Recovery of the binary variable
vecteur_binary_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_binary_bootstrap <- Dicho_Y_bootstrap[(1+Lag):length(Dicho_Y_bootstrap)]
pas_optim <- 0.0001 # intervals for OC (optimal cut-off) estimation
# AM calculation
threshold_estimated <- EWS_AM_Criterion(vecteur_proba_bootstrap, vecteur_binary_bootstrap, pas_optim)
} else {
}
# Error Calculation
Residuals_bootstrap <- Vector_Error(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of the shock in the estimation error vector
size_shock_bootstrap <- quantile(Residuals_bootstrap, centile_shock)
# Calculation of the response function
matrix_results[,(1+(7*(compteur_simul-1))):(7*compteur_simul)] <- GIRF_Dicho(Dicho_Y_bootstrap, Exp_X_bootstrap, Lag, FALSE, t_mod, horizon, size_shock_bootstrap, threshold_estimated)
}
return(matrix_results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%% GIRF Index IC %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - results_simulation_GIRF: matrix output of the Simulation_GIRF function
# - CI_bounds: size of the confidence intervals
# - n_simul: number of simulations
# Output:
# List with:
# - Simulation_CI: the index values that belong in the CI for each horizon
# - values_CI: Index values with lower bound, average index, and upper bound for each horizon
GIRF_Index_CI <- function(results_simul_GIRF, CI_bounds, n_simul, horizon_forecast){
# Index recovery
storage_index <- matrix(data=0, nrow=(horizon_forecast+1), ncol=n_simul)
for (counter_simul in 1:n_simul)
{
storage_index[,counter_simul] <- results_simul_GIRF[,(3+(7*(counter_simul-1)))] - results_simul_GIRF[,(2+(7*(counter_simul-1)))]
}
# store index for each horizon
for (counter_forecast in 1:(horizon_forecast+1))
{
storage_index[counter_forecast,] <-sort(storage_index[counter_forecast,])
}
# Remove values outside of CI
simul_inf <- ceiling( ((1-CI_bounds)/2) * n_simul ) # simulation number of the lower bound
simul_sup <- n_simul - simul_inf + 1 # simulation number of the upper bound
result_CI <- matrix(data=0,nrow=(horizon_forecast+1), ncol=(simul_sup - simul_inf + 1)) # Initialization
result_CI <- storage_index[,simul_inf:simul_sup]
# Index average for each horizon
mean_result <- as.vector(horizon_forecast+1)
for (compteur in 1:(horizon_forecast+1))
{
mean_result[compteur] <- mean(storage_index[compteur,simul_inf:simul_sup])
}
# Matrix with lower bound, average, and upper bound
result_Graph <- matrix(data=0,nrow=(horizon_forecast+1), ncol=3)
result_Graph[,1] <- storage_index[,simul_inf]
result_Graph[,2] <- mean_result
result_Graph[,3] <- storage_index[,simul_sup]
results <- list(Simulation_CI=result_CI, values_CI=result_Graph)
return(results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%% GIRF Index IC %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - results_simulation_GIRF: matrix output of the Simulation_GIRF function
# - CI_bounds: size of the confidence intervals
# - n_simul: number of simulations
# Output:
# List with:
# - horizon
# - Simulation_CI_proba_shock: the proba_shock values that belong in the CI for each horizon
# - Simulation_CI_proba: the proba values that belong in the CI for each horizon
# - CI_proba_shock= proba_shock values with lower bound, average index, and upper bound for each horizon,
# - CI_proba= proba values with lower bound, average index, and upper bound for each horizon,
GIRF_Proba_CI <- function( results_simul_GIRF, CI_bounds, n_simul, horizon_forecast){
# Proba recovery
storage_proba_shock <- matrix(data=0, nrow=(horizon_forecast+1), ncol=n_simul, horizon_forecast)
storage_proba <- matrix(data=0, nrow=(horizon_forecast+1), ncol=n_simul)
for (counter_simul in 1:n_simul)
{
storage_proba_shock[,counter_simul] <- results_simul_GIRF[,(5+(7*(counter_simul-1)))]
storage_proba[,counter_simul] <- results_simul_GIRF[,(4+(7*(counter_simul-1)))]
}
# store proba for each horizon
for (counter_horizon in 1:(horizon_forecast+1))
{
storage_proba_shock[counter_horizon,] <-sort(storage_proba_shock[counter_horizon,])
storage_proba[counter_horizon,] <-sort(storage_proba[counter_horizon,])
}
# Remove values outside of CI
simul_inf <- ceiling( ((1-CI_bounds)/2) * n_simul ) # simulation number of the lower bound
simul_sup <- n_simul - simul_inf + 1 # simulation number of the upper bound
result_CI_proba_shock <- matrix(data=0,nrow=(horizon_forecast+1), ncol=(simul_sup - simul_inf + 1)) # Initialization
result_CI_proba_shock <- storage_proba_shock[,simul_inf:simul_sup]
result_CI_proba <- matrix(data=0,nrow=(horizon_forecast+1), ncol=(simul_sup - simul_inf + 1)) # Initialization
result_CI_proba <- storage_proba[,simul_inf:simul_sup]
# Averages proba for each horizon
mean_result_proba_shock <- as.vector(horizon_forecast+1)
mean_result_proba <- as.vector(horizon_forecast+1)
for (counter_horizon in 1:(horizon_forecast+1))
{
mean_result_proba_shock[counter_horizon] <- mean(result_CI_proba_shock[counter_horizon,])
mean_result_proba[counter_horizon] <- mean(result_CI_proba[counter_horizon,])
}
# Matrix with lower bound, average, and upper bound for proba shock and proba without shock
result_proba_shock <- matrix(data=0,nrow=(horizon_forecast+1), ncol=3)
result_proba_shock[,1] <- storage_proba_shock[,simul_inf]
result_proba_shock[,2] <-mean_result_proba_shock
result_proba_shock[,3] <- storage_proba_shock[,simul_sup]
result_proba <- matrix(data=0,nrow=(horizon_forecast+1), ncol=3)
result_proba[,1] <- storage_proba[,simul_inf]
result_proba[,2] <- mean_result_proba
result_proba[,3] <- storage_proba[,simul_sup]
horizon_vect <- as.vector(rep(0,horizon_forecast+1))
horizon_vect <- c(0:horizon_forecast)
results <- list(horizon = horizon_vect, Simulation_CI_proba_shock=result_CI_proba_shock, Simulation_CI_proba=result_CI_proba,
CI_proba_shock=result_proba_shock, CI_proba=result_proba)
return(results)
}
| /R/EWS_functions.R | no_license | cran/EWS | R | false | false | 77,565 | r | # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%% CODE FUNCTION %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%% Function vector lag %%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Vector_target: a vector of size [T]
# - Nb_lag: number of lags
# - beginning: TRUE: lag at the beginning
# FALSE: lag at the end
# Output: new vector of size [T-nb_lag]
Vector_lag <- function(Vector_target, Nb_lag, beginning){
if (beginning==TRUE){
Vector_target[1:Nb_lag] <- NA
Vector_target <- Vector_target[!is.na(Vector_target)]
results <- as.vector(Vector_target)
return(results)
}else{
size_vector <- length(Vector_target)
Vector_target[(size_vector+1-Nb_lag):size_vector] <- NA
Vector_target <- Vector_target[!is.na(Vector_target)]
results <- as.vector(Vector_target)
return(results)
}
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%% Function matrix lag %%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Matrix_target: a matrix of size [T,n]
# - Nb_lag: number of lags
# - beginning: TRUE: lag at the beginning
# FALSE: lag at the end
# Output: Matrix of size [T-nb_lag,n]
Matrix_lag <- function(Matrix_target, Nb_lag, beginning){
ncol_matrix <- ncol(Matrix_target)
nrow_matrix <- nrow(Matrix_target) - Nb_lag
Var_transition<- matrix(0, nrow_matrix , ncol_matrix )
if (beginning==TRUE){
Matrix_target[1:Nb_lag,] <- NA
Matrix_target <- Matrix_target[!is.na(Matrix_target)]
for (compteur_col in 1:ncol_matrix){
Var_transition[,compteur_col] <- Matrix_target[ ( (compteur_col-1) * nrow_matrix + 1 ):( (compteur_col) * nrow_matrix ) ]
}
results <- as.matrix(Var_transition)
return(results)
}else{
Matrix_target[(nrow_matrix +1):(nrow_matrix + Nb_lag),] <- NA
Matrix_target <- Matrix_target[!is.na(Matrix_target)]
for (compteur_col in 1:ncol_matrix){
Var_transition[,compteur_col] <- Matrix_target[ ( (compteur_col-1) * nrow_matrix + 1 ):( (compteur_col) * nrow_matrix ) ]
}
results <- as.matrix(Var_transition)
return(results)
}
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Fuction EWS: seuil NSR %%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Var_Proba: vector of calculated probabilities
# - Dicho_Y: Binary variable
# - cutoff_interval
# Output:
# threshold with NSR method
EWS_NSR_Criterion <- function(Var_Proba, Dicho_Y, cutoff_interval){
nb_period <- length(Var_Proba)
# initialization Matrix results
Matrix_results <- matrix(data=0, nrow= (floor(1/cutoff_interval)+1) , ncol= 2)
counter_results <- 1
# test of each threshold with optim_interval parameter
for (optim_target in seq(0 , 1 , by = cutoff_interval ))
{
# initialization counter for matching and missmatching binary variable
counter_matching <- 0
counter_missmatching <- 0
# loop to compute matching and missmatching binary variable
for (counter_interval in 1:nb_period){
if (Var_Proba[counter_interval] >= optim_target){
dummy_dicho <- 1
}else{
dummy_dicho <- 0
}
if ((Dicho_Y[counter_interval] == 1) && (dummy_dicho == 1)){
counter_matching <- counter_matching + 1
}else if ((Dicho_Y[counter_interval] == 0) && (dummy_dicho == 1)){
counter_missmatching <- counter_missmatching + 1
}else{
counter_matching <- counter_matching
}
}
# recovery of results
if (counter_matching != 0){
ratio_tampon <- counter_missmatching /counter_matching
Matrix_results[counter_results, 1] <- optim_target
Matrix_results[counter_results, 2] <- ratio_tampon
}else{
Matrix_results[counter_results, 1] <- NA
Matrix_results[counter_results, 2] <- NA
}
counter_results <- counter_results + 1
}
optim_cutoff_NSR <- Matrix_results[order(Matrix_results[,2]),][1,1]
return(optim_cutoff_NSR)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%% Function EWS: AM Criteria %%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Var_Proba: vector of calculated probabilities
# - Dicho_Y: Binary variable
# - cutoff_interval
# Output:
# threshold with AM method
EWS_AM_Criterion <- function(Var_Proba, Dicho_Y, cutoff_interval){
nb_period <- length(Dicho_Y)
# number of 1 in Dicho_Y
counter_1 <- sum(Dicho_Y)
# number of 0 in Dicho_Y
counter_0 <- nb_period - counter_1
# initialization Matrix results
Matrix_results <- matrix(data=0, nrow= (floor(1/cutoff_interval)+1) , ncol= 2)
counter_results <- 1
# Loop in order to test each threshold with optim_interval parameter
for (optim_target in seq(0,1,cutoff_interval) ){
# Initilization of counter to calculate sensibility and specificity
counter_sensibility <- 0
counter_specificity <- 0
# Calculate sensibility and specificity counters
for (counter_interval in 1:nb_period){
if ( (Var_Proba[counter_interval] >= optim_target) && (Dicho_Y[counter_interval] == 1) )
{
counter_sensibility <- counter_sensibility + 1
}else if ( (Var_Proba[counter_interval] < optim_target) && (Dicho_Y[counter_interval] == 0) ){
counter_specificity <- counter_specificity + 1
}else{
}
}
# Calculate sensibility and specificity
Sensibility <- counter_sensibility / counter_1
Specificity <- counter_specificity / counter_0
# recovery of results
ratio_tampon <- abs(Sensibility + Specificity - 1)
Matrix_results[counter_results, 1] <- optim_target
Matrix_results[counter_results, 2] <- ratio_tampon
counter_results <- counter_results + 1
}
optim_cutoff_AM <- Matrix_results[order(Matrix_results[,2], decreasing = TRUE),][1,1]
return(optim_cutoff_AM)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Function EWS: seuil CSA %%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Var_Proba: vector of calculated probabilities
# - Dicho_Y: Binary variable
# - cutoff_interval
# Output:
# threshold with CSA method
EWS_CSA_Criterion <- function(Var_Proba, Dicho_Y , cutoff_interval){
nb_period <- length(Dicho_Y)
# number of 1 in Dicho_Y
counter_1 <- sum(Dicho_Y)
# number of 0 in Dicho_Y
counter_0 <- nb_period - counter_1
# initialization Matrix results
Matrix_results <- matrix(data=0, nrow= (floor(1/cutoff_interval)+1) , ncol= 2)
counter_results <- 1
# Initilization of counter to calculate sensibility and specificity
for (optim_target in seq(0,1,cutoff_interval) )
{
# Initilization of sensibility and specificity counters
counter_sensibility <- 0
counter_specificity <- 0
# Calculate sensibility and specificity counters
for (counter_interval in 1:nb_period){
if ( (Var_Proba[counter_interval] >= optim_target) && (Dicho_Y[counter_interval]==1) ){
counter_sensibility <- counter_sensibility + 1
}else if ((Var_Proba[counter_interval] < optim_target) && (Dicho_Y[counter_interval]==0) ){
counter_specificity <- counter_specificity + 1
}else{
}
}
# Calculate sensibility and specifity
Sensibility <- counter_sensibility / counter_1
Specificity <- counter_specificity / counter_0
# recovery of results
ratio_tampon <- abs(Sensibility - Specificity)
Matrix_results[counter_results, 1] <- optim_target
Matrix_results[counter_results, 2] <- ratio_tampon
counter_results <- counter_results + 1
}
optim_cutoff_CSA <- Matrix_results[order(Matrix_results[,2]),][1,1]
return(optim_cutoff_CSA)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%% Vector error %%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Intercept: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - Nb_Id: number of individuals
# - Lag: number of lag for the logistic estimation
# - type_model:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# Output:
# Vector of estimation errors
Vector_Error <- function(Dicho_Y, Exp_X, Intercept, Nb_Id, Lag, type_model)
{
# Estimation
logistic_results <- Logistic_Estimation(Dicho_Y, Exp_X, Intercept, Nb_Id, Lag, type_model)
# Error Calculation
Results <- as.vector(rep(0,length(logistic_results$prob))) # Initialization
Results <- Dicho_Y[1:length(logistic_results$prob)] - logistic_results$prob # Estimation
return(Results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%% Logistic Estimation Function - 4 models %%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Intercept: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - Nb_Id: number of individuals
# - Lag: number of lag for the logistic estimation
# - type_model:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# Output:
# - "coeff": Coefficients of the logit regression
# - "AIC" : AIC criteria value
# - "BIC" : BIC criteria value
# - "R2" : R2 value
# - "LogLik" : LogLikelihood value
# - "VarCov" : matrix VarCov
Logistic_Estimation <- function(Dicho_Y, Exp_X, Intercept, Nb_Id, Lag, type_model)
{
# --------------------------------------------------------------------------------
# ------------------------------- Data Processing --------------------------------
# --------------------------------------------------------------------------------
T_size <- length(Dicho_Y) # number of periods for all sample
Nb_periode <- T_size/Nb_Id # number of periods for each country
# Add the intercept in the matrix of explanatory variables if Intercept=TRUE
if (Intercept==TRUE){
Cste <- as.vector(rep(1,T_size))
Exp_X <- cbind(Cste, Exp_X)
}else{
}
# For the model 2 and the model 4, we add the binary crisis in the matrix of explanatory variables
if (type_model==2){
Exp_X <- cbind(Exp_X, Dicho_Y)
}else if (type_model==4){
Exp_X <- cbind(Exp_X, Dicho_Y)
}else{
}
# Creation of Var_X_Reg and Var_Y_Reg
# - Var_X_Reg: vector or matrix of explanatory variables for the logit regression
# - Var_Y_Reg: vector of the binary variable for the logit regression
if (Nb_Id == 1){ # If there is only one Id (univariate case)
if ( length(Exp_X) == length(Dicho_Y) ) {
# Initialization
Var_X_Reg <- as.vector( rep ( 0 , (Nb_periode- Lag) ) )
Var_Y_Reg <- as.vector( rep ( 0 , (Nb_periode- Lag) ) )
# data processing with lag
Var_X_Reg[ 1 : (Nb_periode - Lag) ] <- Vector_lag( (Exp_X[1:Nb_periode]) , Lag , FALSE )
Var_Y_Reg[ 1 : (Nb_periode - Lag) ] <- Vector_lag( (Dicho_Y[1:Nb_periode]) , Lag , TRUE )
} else {
# Initialization
Var_X_Reg <- matrix(data = 0, nrow = (Nb_periode- Lag), ncol = (ncol(Exp_X)) )
Var_Y_Reg <- as.vector(rep(0, (Nb_periode- Lag)))
# data processing with lag
Var_X_Reg[1:(Nb_periode- Lag),] <- Matrix_lag( (Exp_X[1:Nb_periode,]) , Lag , FALSE )
Var_Y_Reg[1:(Nb_periode- Lag)] <- Vector_lag( (Dicho_Y[1:Nb_periode]) , Lag , TRUE )
}
}else{ # balanced panel case
# Creation of the fixed effects matrix
Fixed_Effects <- matrix( data=0, ncol = Nb_Id-1 , nrow=T_size )
for ( compteur in 1:(Nb_Id-1) ) {
Fixed_Effects[((compteur-1)*Nb_periode+1):(compteur*Nb_periode),compteur] <- 1
}
# New matrix of explanatory variables
Exp_X <- cbind( Exp_X , Fixed_Effects )
# Initialization
Var_X_Reg <- matrix( data = 0 , nrow = (Nb_periode- Lag) * Nb_Id , ncol = ( ncol(Exp_X) ) )
Var_Y_Reg <- as.vector( rep ( 0 , Nb_Id * (Nb_periode- Lag) ) )
# data processing with lag
for (compteur_Id in 1:Nb_Id){
Var_X_Reg[ ( 1 + ( (compteur_Id-1) * (Nb_periode- Lag) ) ) : ( (compteur_Id) * (Nb_periode- Lag) ),] <- Matrix_lag( (Exp_X[ ( 1 + ( (compteur_Id-1) * Nb_periode ) ) :( (compteur_Id) * Nb_periode ),]) , Lag , FALSE )
Var_Y_Reg[ ( 1 + ( (compteur_Id-1) * (Nb_periode- Lag) ) ) : ( compteur_Id * (Nb_periode- Lag) ) ] <- Vector_lag( (Dicho_Y[( 1 + ( (compteur_Id-1) * Nb_periode ) ) :( (compteur_Id) * Nb_periode )]) , Lag , TRUE )
}
}
# --------------------------------------------------------------------------------
# ----------------------------- Logistic Estimation ------------------------------
# --------------------------------------------------------------------------------
# ----------------------------------- Model 1 -----------------------------------
if (type_model==1) {
if (length(Var_X_Reg)==length(Var_Y_Reg)){
# Initialization - simple OLS
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
coeff_initialize <- coeff_initialize/0.25
}else{
# Initialization - simple OLS
coeff_initialize <- as.vector(rep(0,ncol(Var_X_Reg)))
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
if (Intercept==TRUE) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else {
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
}
F_mod1 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last))
beta <- par[1:(last)]
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
if (length(Var_X_Reg)==T_size_function) {
expf[compteur] <- exp(Var_X_Reg[compteur] %*% beta)/(1+exp(Var_X_Reg[compteur] %*% beta))
}else{
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% beta)/(1+exp(Var_X_Reg[compteur,] %*% beta))
}
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# log-likelihood maximization function
return(-sum(lik))
}
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
results <- optim(coeff_initialize, F_mod1, gr = NULL,
lower = -1, upper = 1, method = "Brent", control = list(maxit = 50000, factr = TRUE, abstol=0.00001), hessian = FALSE)
}else{
results <- optim(coeff_initialize, F_mod1, gr = NULL, method = "Nelder-Mead", control = list(maxit = 50000, factr = TRUE, abstol=0.00001), hessian = FALSE)
}
# Estimated parameters
if (length(Var_X_Reg)==length(Var_Y_Reg)){
Beta <- results$par[1]
}else{
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
}
# Approximation Hessian Matrix
hessc <- hessian(func=F_mod1, x=Beta , "Richardson")
# Initialisation of the vector of index
T_size <- length(Var_Y_Reg)
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialization of the vector of density
pdf <- as.vector(rep(0,T_size))
for (compteur in 1:T_size) {
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
expf[compteur] <- exp(Var_X_Reg[compteur] %*% Beta)/(1+exp(Var_X_Reg[compteur] %*% Beta))
pdf[compteur] <- exp(Var_X_Reg[compteur] %*% Beta)/((1+exp(Var_X_Reg[compteur] %*% Beta))^2)
}else{
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/(1+exp(Var_X_Reg[compteur,] %*% Beta))
pdf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/((1+exp(Var_X_Reg[compteur,] %*% Beta))^2)
}
}
# The vector of estimated probabilities
prob <- as.vector(rep(0,T_size))
for (compteur in 1:T_size) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Matrix of explanatory variables
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
X <- as.vector(rep(0,T_size-1))
X <- Var_X_Reg[2:T_size]
}else{
X <- matrix(0,nrow=T_size-1,ncol=(ncol(Var_X_Reg)))
X <- Var_X_Reg[2:T_size,]
}
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
gradient <- as.vector(rep(0,(T_size-1)))
gradient <- vect_gradient
}else{
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)))
for (compteur in 1:(ncol(Var_X_Reg))) {
gradient[,compteur] <- vect_gradient
}
}
gradient <- gradient * X
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)) {
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)) {
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
if (length(Var_X_Reg)==length(Var_Y_Reg)){
Covariance <- t(gradient[(1+compteur):(T_size-1)]) %*% gradient[1:(T_size-1-compteur)]/(T_size-compteur)
}else{
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
}
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
# Estimated parameters
if (length(Var_X_Reg)==length(Var_Y_Reg)) {
result_param <- Beta
ind <- as.vector(rep(0,T_size))
ind <- Var_X_Reg * Beta
}else{
result_param <- as.vector(rep(0,ncol(Var_X_Reg)))
result_param <- Beta
ind <- as.vector(rep(0,T_size))
ind <- Var_X_Reg %*% Beta
}
# Asymptotic Matrix of Var-Cov
V <- solve(hessc)
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- -results$value
if (length(Var_X_Reg)==length(Var_Y_Reg)){
# AIC information criteria
AIC <- -2*loglikelihood + 1
# BIC information criteria
BIC <- -2*loglikelihood + log(T_size)
}else{
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg))*log(T_size)
}
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}else{
if (length(Var_X_Reg)==length(Var_Y_Reg)){
nb_Var_X <- 1
nameVarX <- 1
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X))
nameVarX <- c(1:(nb_Var_X))
}
}
name <- as.vector(rep(0,nb_Var_X))
Estimate <- as.vector(rep(0,nb_Var_X))
Std.Error <- as.vector(rep(0,nb_Var_X))
zvalue <- as.vector(rep(0,nb_Var_X))
Pr <- as.vector(rep(0,nb_Var_X))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
Coeff.results <- data.frame(
name = c("Intercept" ,nameVarX),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
# DataFrame with coefficients and significativity
Coeff.results <- data.frame(
name = c(nameVarX),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
# ----------------------------------- Modele 2 -----------------------------------
} else if (type_model==2) {
# Initialization
coeff_initialize <- as.vector(rep(0,ncol(Var_X_Reg)))
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
if (Intercept==TRUE) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else{
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
F_mod2 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last))
beta <- par[1:(last)]
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% beta)/(1+exp(Var_X_Reg[compteur,] %*% beta))
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# log-likelihood maximization function
return(-sum(lik))
}
results <- optim(coeff_initialize, F_mod2, gr = NULL, method = "Nelder-Mead", control = list(maxit = 500000, factr = TRUE, abstol=0.00001), hessian = FALSE)
# Estimated parameters
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
# Approximation Hessian Matrix
hessc <- hessian(func=F_mod2, x=Beta , "Richardson")
# Initialization of the vector of index
T_size <- length(Var_Y_Reg)
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialization of the vector of density
pdf <- as.vector(rep(0,T_size))
for (compteur in 1:T_size) {
expf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/(1+exp(Var_X_Reg[compteur,] %*% Beta))
pdf[compteur] <- exp(Var_X_Reg[compteur,] %*% Beta)/((1+exp(Var_X_Reg[compteur,] %*% Beta))^2)
}
# The vector of estimated probabilities
prob <- as.vector(T_size)
for (compteur in 1:T_size) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Matrix of explicative variables
X <- matrix(0,nrow=T_size-1,ncol=(ncol(Var_X_Reg)))
X <- Var_X_Reg[2:T_size,]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)))
for (compteur in 1:(ncol(Var_X_Reg))) {
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)){
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)){
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
# Estimated parameters
result_param <- as.vector(rep(0,ncol(Var_X_Reg)))
result_param <- Beta
ind <- as.vector(rep(0,T_size))
ind <- Var_X_Reg %*% Beta
# Asymptotic Matrix of Var-Cov
V <- solve(hessc)
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- -results$value
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg))*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-2))
nameVarX <- c(1:(nb_Var_X-2))
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}
name <- as.vector(rep(0,nb_Var_X))
Estimate <- as.vector(rep(0,nb_Var_X))
Std.Error <- as.vector(rep(0,nb_Var_X))
zvalue <- as.vector(rep(0,nb_Var_X))
Pr <- as.vector(rep(0,nb_Var_X))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
if (Nb_Id > 1){
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX[1:(nb_Var_X-2-Nb_Id+1)] ,"Binary_Lag", nameVarX[(nb_Var_X-2-Nb_Id+2):(nb_Var_X-2)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX ,"Binary_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}else{
# DataFrame with coefficients and significativity
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c(nameVarX[1:(nb_Var_X-1-Nb_Id+1)] ,"Binary_Lag", nameVarX[(nb_Var_X-1-Nb_Id+2):(nb_Var_X-1)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c(nameVarX ,"Binary_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
# ----------------------------------- Modele 3 -----------------------------------
} else if (type_model==3){
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
# Initialization of coefficients
coeff_initialize <- as.vector( rep(0,2) )
coeff_initialize[1] <- solve( t(Var_X_Reg) %*% (Var_X_Reg) ) %*% t(Var_X_Reg) %*% (Var_Y_Reg)
# Initialization of the Index
Pi <- 0
coeff_initialize[2] <- Pi
}else{
# Initialization of coefficients
coeff_initialize <- as.vector( rep ( 0, ncol(Var_X_Reg) + 1 ) )
coeff_initialize[1:ncol(Var_X_Reg)] <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
# Initialization of the Index
Pi <- 0
coeff_initialize[ncol(Var_X_Reg)+1] <- Pi
}
if (Intercept==TRUE) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else{
if ( length(Var_X_Reg) == length(Var_X_Reg) ) {
coeff_initialize <- coeff_initialize/0.25
}else{
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
}
F_mod3 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last-1))
beta <- par[1:(last-1)]
# Parameter of the lagged index (here a logistic transformation)
alpha <- par[last]/(1+abs(par[last]))
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of the index
ind <- as.vector(rep(0,T_size_function))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
# Initial value for the index
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
mean_Var_X[compteur] <- mean(Var_X_Reg)
p0 <- mean_Var_X * beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
} else {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
p0 <- mean_Var_X %*% beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
}
}
for (compteur in 2:T_size_function) {
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur] * beta
} else {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% beta
}
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# We do not consider the initial condition on the index
estim_lik <- as.vector(rep(0,(T_size_function-1)))
estim_lik <- lik[2:T_size_function]
# log-likelihood maximization function
return(-sum(lik))
}
results <- optim(coeff_initialize, F_mod3, gr = NULL, method = "Nelder-Mead", control = list(maxit = 500000, factr = TRUE, abstol=0.00001), hessian = FALSE)
# Logistic transformation of alpha (inverse)
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
psi <- results$par[2]
# Estimated parameter alpha
alpha <- psi/(1+abs(psi))
# Intermediate element required by tge analytical gradient
h <- 0.000001
# Analytical Gradient for alpha / psi
d2 <- (((psi+h)/(1+abs(psi+h)))-((psi-h)/(1+abs(psi-h))))/(2*h)
# Matrix of Taylor development of the logistic function
C <- diag(2)
C[2,2] <- d2
}else{
psi <- results$par[ncol(Var_X_Reg)+1]
# Estimated parameter alpha
alpha <- psi/(1+abs(psi))
# Intermediate element required by tge analytical gradient
h <- 0.000001
# Analytical Gradient for alpha / psi
d2 <- (((psi+h)/(1+abs(psi+h)))-((psi-h)/(1+abs(psi-h))))/(2*h)
# Matrix of Taylor development of the logistic function
C <- diag(ncol(Var_X_Reg)+1)
C[(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1)] <- d2
}
# =======================
# === Correction term ===
# =======================
# Estimated parameters
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
Beta <- results$par[1]
}else{
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
}
# Initialization of the vector of index
T_size <- length(Var_Y_Reg)
ind <- as.vector(rep(0,T_size))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialization of the vector of density
pdf <- as.vector(rep(0,T_size))
# Initial value for the index
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
last <- 2
mean_Var_X <- mean(Var_X_Reg)
p0 <- mean_Var_X * Beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
pdf[1] <- exp(ind[1])/((1+exp(ind[1]))^2)
for (compteur in 2:T_size) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur] * Beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
pdf[compteur] <- exp(ind[compteur])/((1+exp(ind[compteur]))^2)
}
}else{
last <- ncol(Var_X_Reg)+1
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
}
p0 <- mean_Var_X %*% Beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
pdf[1] <- exp(ind[1])/((1+exp(ind[1]))^2)
for (compteur in 2:T_size) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% Beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
pdf[compteur] <- exp(ind[compteur])/((1+exp(ind[compteur]))^2)
}
}
# ==============================
# === Robust standard Errors ===
# ==============================
# The vector of estimated probabilities
prob <- as.vector(T_size)
for (compteur in 1:T_size) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Initial value for the index
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
# Matrix of explanatory variables (the first of observation is equal to p0)
X <- matrix(0,nrow=(T_size-1),ncol=2)
X[,1] <- Var_X_Reg[2:T_size]
X[,2] <- ind[1:(T_size-1)]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,(T_size-1)))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=2)
for (compteur in 1:2){
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
}else{
# Matrix of explanatory variables (the first of observation is equal to p0)
X <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
X[,1:(ncol(Var_X_Reg))] <- Var_X_Reg[2:T_size,]
X[,ncol(Var_X_Reg)+1] <- ind[1:(T_size-1)]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
for (compteur in 1:(ncol(Var_X_Reg)+1)) {
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
}
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)) {
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)){
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
if ( length(Var_X_Reg) == length(Var_Y_Reg) ) {
# Parameter alpha
alpha <- psi / (1+abs(psi))
# Estimated parameters
result_param <- as.vector(rep(0,2))
result_param <- c(Beta, alpha)
# Hessian matrix
hessc <- matrix(0,2,2)
hessc <- hessian(func=F_mod3, x=result_param , "Richardson")
# Non-robust variance-covariance matrix in the new space
V <- C %*% solve(hessc) %*% C
# Asymptotic Matrix of Var-Cov
V_nonrobust <- V
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V_nonrobust))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- - results$value
# AIC information criteria
AIC <- -2*loglikelihood + 2
# BIC information criteria
BIC <- -2*loglikelihood + 2*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -1)/Lc)
}else{
# Parameter alpha
alpha <- psi / (1+abs(psi))
# Estimated parameters
result_param <- as.vector(rep(0,ncol(Var_X_Reg)+1))
result_param <- c(Beta, alpha)
# Hessian matrix
hessc <- matrix(0,(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1))
hessc <- hessian(func=F_mod3, x=result_param , "Richardson")
# Non-robust variance-covariance matrix in the new space
V <- C %*% solve(hessc) %*% C
# Asymptotic Matrix of Var-Cov
V_nonrobust <- V
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V_nonrobust))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- - results$value
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)+1
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg)+1)*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
}
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}else{
if (length(Var_X_Reg)==length(Var_Y_Reg)){
nb_Var_X <- 1
nameVarX <- 1
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X))
nameVarX <- c(1:(nb_Var_X))
}
}
name <- as.vector(rep(0,nb_Var_X+1))
Estimate <- as.vector(rep(0,nb_Var_X+1))
Std.Error <- as.vector(rep(0,nb_Var_X+1))
zvalue <- as.vector(rep(0,nb_Var_X+1))
Pr <- as.vector(rep(0,nb_Var_X+1))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX[1:(nb_Var_X-1-Nb_Id+1)] ,"Index_Lag", nameVarX[(nb_Var_X-1-Nb_Id+2):(nb_Var_X-1)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX ,"Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}else{
# DataFrame with coefficients and significativity
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c(nameVarX[1:(nb_Var_X-Nb_Id+1)] ,"Index_Lag", nameVarX[(nb_Var_X-Nb_Id+2):(nb_Var_X)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c(nameVarX ,"Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
# ----------------------------------- Modele 4 -----------------------------------
} else if (type_model==4) {
# Initialization of coefficients
coeff_initialize <- as.vector(rep(0,ncol(Var_X_Reg)+1))
coeff_initialize <- solve(t(Var_X_Reg)%*%(Var_X_Reg))%*%t(Var_X_Reg)%*%(Var_Y_Reg)
# Initialisation of the index
Pi <- 0
coeff_initialize[ncol(Var_X_Reg)+1] <- Pi
if (Intercept==1) {
coeff_initialize[1] <- (coeff_initialize[1]-0.4)/0.25
for (compteur in 2:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}else{
for (compteur in 1:ncol(Var_X_Reg)) {
coeff_initialize[compteur] <- coeff_initialize[compteur]/0.25
}
}
F_mod4 <- function(par) {
# Sample Size
T_size_function <- length(Var_Y_Reg)
# Number of Coeff
last <- length(par)
# Vector of parameters except the lagged index
beta <- as.vector(rep(0,last-1))
beta <- par[1:(last-1)]
# Parameter of the lagged index (here a logistic transformation)
alpha <- par[last]/(1+abs(par[last]))
# =================================
# === Construction of the index ===
# =================================
# Initialization of the vector of the index
ind <- as.vector(rep(0,T_size_function))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size_function))
# Initial value for the index
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
}
p0 <- mean_Var_X %*% beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
for (compteur in 2:T_size_function) {
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
}
prob <- as.vector(rep(0,T_size_function))
for (compteur in 1:T_size_function) {
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Vector of individual log-likelihood
lik <- Var_Y_Reg*log(prob)+(1-Var_Y_Reg)*log(1-prob)
# We do not consider the initial condition on the index
estim_lik <- as.vector(rep(0,(T_size_function-1)))
estim_lik <- lik[2:T_size_function]
# log-likelihood maximization function
return(-sum(lik))
}
results <- optim(coeff_initialize, F_mod4, gr = NULL, method = "Nelder-Mead", control = list(maxit = 500000, factr = TRUE, abstol=0.00001), hessian = FALSE)
# Logistic transformation of alpha (inverse)
psi <- results$par[ncol(Var_X_Reg)+1]
# Estimated parameter alpha
alpha <- psi/(1+abs(psi))
# Intermediate element required by tge analytical gradient
h <- 0.000001
# Analytical Gradient for alpha / psi
d2 <- (((psi+h)/(1+abs(psi+h)))-((psi-h)/(1+abs(psi-h))))/(2*h)
# Matrix of Taylor development of the logistic function
C <- diag(ncol(Var_X_Reg)+1)
C[(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1)] <- d2
# =======================
# === Correction term ===
# =======================
# Estimated parameters
Beta <- as.vector(rep(0,ncol(Var_X_Reg)))
Beta <- results$par[1:ncol(Var_X_Reg)]
# Initialization of the vector of index
T_size <- length(Var_Y_Reg)
ind <- as.vector(rep(0,T_size))
# Initialization of the vector of probability
expf <- as.vector(rep(0,T_size))
# Initialisation of the vector of density
pdf <- as.vector(rep(0,T_size))
# Initial value for the index
last <- ncol(Var_X_Reg)+1
mean_Var_X <- as.vector(rep(0,(last-1)))
for (compteur in 1:(last-1)) {
mean_Var_X[compteur] <- mean(Var_X_Reg[,compteur])
}
p0 <- mean_Var_X %*% Beta/(1-alpha)
ind[1] <- p0
expf[1] <- exp(ind[1])/(1+exp(ind[1]))
pdf[1] <- exp(ind[1])/((1+exp(ind[1]))^2)
for (compteur in 2:T_size){
ind[compteur] <- alpha * ind[compteur-1] + Var_X_Reg[compteur,] %*% Beta
expf[compteur] <- exp(ind[compteur])/(1+exp(ind[compteur]))
pdf[compteur] <- exp(ind[compteur])/((1+exp(ind[compteur]))^2)
}
# ==============================
# === Robust standard Errors ===
# ==============================
# The vector of estimated probabilities
prob <- as.vector(rep(0,T_size))
for (compteur in 1:T_size){
prob[compteur] <- min(max(expf[compteur],0.0000001),0.999999)
}
# Matrix of explanatory variables (the first of observation is equal to p0)
X <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
X[,1:(ncol(Var_X_Reg))] <- Var_X_Reg[2:T_size,]
X[,ncol(Var_X_Reg)+1] <- ind[1:(T_size-1)]
# Matrix of gradient
vect_gradient <- as.vector(rep(0,T_size-1))
vect_gradient <- (Var_Y_Reg[2:T_size]-prob[2:T_size])/(prob[2:T_size]*(1-prob[2:T_size]))*pdf[2:T_size]
gradient <- matrix(0,nrow=(T_size-1),ncol=(ncol(Var_X_Reg)+1))
for (compteur in 1:(ncol(Var_X_Reg)+1)) {
gradient[,compteur] <- vect_gradient
}
gradient <- gradient * X
# Matrix of covariance of gradient
I=t(gradient)%*%gradient/(T_size-1)
# Bandwith parameter
bdw <- floor(4*(T_size/100)^(2/9))
for (compteur in 1:bdw) {
u=abs(compteur/bdw)
if ((0.5<u)&&(u<=1)) {
w=2*(1-abs(u))^3
}else if((0<u)&&(u<=0.5)){
w=1-6*abs(u)^2+6*abs(u)^3
}
# Matrix of estimated autovariance of gradient
Covariance <- t(gradient[(1+compteur):(T_size-1),]) %*% gradient[1:(T_size-1-compteur),]/(T_size-compteur)
# Matrix of asymptotic correction
I <- I + w * (Covariance + t(Covariance))
}
# ===============
# === Results ===
# ===============
# Parameter alpha
alpha <- psi / (1+abs(psi))
# Estimated parameters
result_param <- as.vector(rep(0,ncol(Var_X_Reg)+1))
result_param <- c(Beta, alpha)
# Hessian matrix
hessc <- matrix(0,(ncol(Var_X_Reg)+1),(ncol(Var_X_Reg)+1))
hessc <- hessian(func=F_mod4, x=result_param , "Richardson")
# Non-robust variance-covariance matrix in the new space
V <- C %*% solve(hessc) %*% C
# Asymptotic Matrix of Var-Cov
V_nonrobust <- V
# Asymptotic standard errors (non robust)
Std <- t(diag(sqrt(abs(V_nonrobust))))
# Robust var-cov
VCM <- V %*% I %*% V * T_size
# Robust standard errors
Rob_Std <- t(diag(sqrt(abs(VCM))))
# Log-Likelihood
loglikelihood <- -results$value
# AIC information criteria
AIC <- -2*loglikelihood + ncol(Var_X_Reg)+1
# BIC information criteria
BIC <- -2*loglikelihood + (ncol(Var_X_Reg)+1)*log(T_size)
# R2
Lc <- sum(Var_Y_Reg*log(mean(Var_Y_Reg))+(1-Var_Y_Reg)*log(1-mean(Var_Y_Reg)))
R2 <- 1 - ((loglikelihood -ncol(Var_X_Reg))/Lc)
# initialize the coefficients matrix results
if (Intercept==TRUE){
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-2))
nameVarX <- c(1:(nb_Var_X-2))
}else{
nb_Var_X <- ncol(Var_X_Reg)
nameVarX <- as.vector(rep(0,nb_Var_X-1))
nameVarX <- c(1:(nb_Var_X-1))
}
name <- as.vector(rep(0,nb_Var_X+1))
Estimate <- as.vector(rep(0,nb_Var_X+1))
Std.Error <- as.vector(rep(0,nb_Var_X+1))
zvalue <- as.vector(rep(0,nb_Var_X+1))
Pr <- as.vector(rep(0,nb_Var_X+1))
if (Intercept==TRUE){
# DataFrame with coefficients and significativity with intercept
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX[1:(nb_Var_X-2-Nb_Id+1)],"Binary_Lag" ,"Index_Lag", nameVarX[(nb_Var_X-2-Nb_Id+2):(nb_Var_X-2)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c("Intercept" , nameVarX , "Binary_Lag", "Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}else{
# DataFrame with coefficients and significativity
if (Nb_Id>1){
Coeff.results <- data.frame(
name = c(nameVarX[1:(nb_Var_X-1-Nb_Id+1)],"Binary_Lag" ,"Index_Lag", nameVarX[(nb_Var_X-1-Nb_Id+2):(nb_Var_X-1)]),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}else{
Coeff.results <- data.frame(
name = c(nameVarX , "Binary_Lag", "Index_Lag"),
Estimate = result_param,
Std.Error = t(Rob_Std),
zvalue = t(result_param/Rob_Std),
Pr = (1-pnorm(q=abs(t(result_param/Rob_Std))))*2,
stringsAsFactors = FALSE
)
}
}
results <- list(Estimation=Coeff.results,
AIC=AIC, BIC=BIC, R2=R2, index=ind[1:T_size], prob = prob, LogLik=loglikelihood, VarCov=VCM)
}
return(results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%% Function BlockBootstrapp %%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Intercept: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - n_simul: number of simulations for the block bootstrapp
# Output:
# Matrix with bootstrapp series
BlockBootstrapp <- function(Dicho_Y, Exp_X, Intercept, n_simul)
{
# optimal block size
size_block <- floor(length(Dicho_Y)^(1/5))
if (length(Dicho_Y)==length(Exp_X) && Intercept==TRUE){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)), Exp_X), ncol=2, nrow=length(Dicho_Y))
}else if (length(Dicho_Y)!=length(Exp_X) && Intercept==TRUE){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)), Exp_X), ncol=(1+ncol(Exp_X)), nrow=length(Dicho_Y))
}
# number of colomns for simulation matrix
if (length(Dicho_Y)==length(Exp_X))
{
nvalue <- 2 # 1 explanatory variable + 1 binary variable
}else
{
nvalue <- ncol(Exp_X)+1 # n explanatory variables + 1 binary variable
}
# Initialization of matrix results
matrix_results <- matrix(data=0, ncol= (nvalue*n_simul), nrow=length(Dicho_Y))
for (compteur_simul in 1:n_simul)
{
# block position
block_position <- sample(1:(length(Dicho_Y)-size_block+1),1)
# block recovery
block_value <- matrix(data=0, ncol= nvalue, nrow= size_block )# initialisation de la taille du block
if (length(Dicho_Y)==length(Exp_X))
{
block_value[,1] <- Dicho_Y[block_position:(block_position+size_block-1)]
block_value[,2] <- Exp_X[block_position:(block_position+size_block-1)]
} else {
block_value[,1] <- Dicho_Y[block_position:(block_position+size_block-1)]
block_value[,2:nvalue] <- Exp_X[block_position:(block_position+size_block-1),]
}
# Recovery of results
if (length(Dicho_Y)==length(Exp_X))
{
matrix_results[1:(length(Dicho_Y)-size_block),(1+(compteur_simul-1)*nvalue)] <- Dicho_Y[(size_block+1):(length(Dicho_Y))]
matrix_results[1:(length(Dicho_Y)-size_block),(compteur_simul*nvalue)] <- Exp_X[(size_block+1):(length(Dicho_Y))]
matrix_results[(length(Dicho_Y)-size_block+1):length(Dicho_Y),(1+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)] <- block_value
} else {
matrix_results[1:(length(Dicho_Y)-size_block),(1+(compteur_simul-1)*nvalue)] <- Dicho_Y[(size_block+1):(length(Dicho_Y))]
matrix_results[1:(length(Dicho_Y)-size_block),(2+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)] <- Exp_X[(size_block+1):(length(Dicho_Y)),]
matrix_results[(length(Dicho_Y)-size_block+1):length(Dicho_Y),(1+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)] <- block_value
}
}
return(matrix_results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%% GIRF function %%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Lag: number of lag to calculate the GIRF
# - Int: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - t_mod:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# - horizon: horizon target for the GIRF analysis
# - shock_size: size of the shock
# - OC: threshold to determine the value of the dichotomous variable as a function of the index level
# Output:
# Matrix with:
# - column 1: horizon
# - column 2: index
# - column 3: index with shock
# - column 4: probability associated to the index
# - column 5: probability associated to the index with shock
# - column 6: binary variable associated to the index
# - column 7: binary variable associated to the index with shock
GIRF_Dicho <- function(Dicho_Y, Exp_X, Lag, Int, t_mod, horizon, shock_size, OC)
{
# Initialization
matrix_results <- matrix(data=0,nrow=(horizon+1), ncol=7)
for (compteur_horizon in 0:horizon)
{
if (compteur_horizon==0)
{
# index at time 0
results_forecast <- Logistic_Estimation(Dicho_Y, Exp_X, Int, 1, Lag, t_mod)
# estimated coefficients
Coeff_estimated <- as.vector(rep(0,length(results_forecast$Estimation[,2]))) # Initialization
Coeff_estimated <- results_forecast$Estimation[,2]
# last explanatory variables
Last_Exp_X <- as.vector(rep(0,length(Coeff_estimated)))
# Data Processing
if (Int==TRUE && length(Dicho_Y)==length(Exp_X)){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)),Exp_X), nrow=length(Dicho_Y) , ncol=2 )
} else if (Int==TRUE && length(Dicho_Y)!=length(Exp_X)){
Exp_X <- matrix(data=c(rep(1,length(Dicho_Y)),Exp_X), nrow=length(Dicho_Y) , ncol=(1+ncol(Exp_X)) )
}
if (t_mod==1){
# model 1: Exp_X
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)])
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),])
}
} else if (t_mod==2) {
# model 2: Exp_X + Dicho_Y
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)], Dicho_Y[length(Dicho_Y)])
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),], Dicho_Y[length(Dicho_Y)])
}
} else if (t_mod==3) {
# model 3: Exp_X + Index
Last_Index <- results_forecast$index[length(results_forecast$index)] # Recover last index
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)], Last_Index)
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),], Last_Index)
}
} else if (t_mod==4) {
# model 4: Exp_X + Index + Dicho_Y
Last_Index <- results_forecast$index[length(results_forecast$index)] # Recover last index
if (length(Dicho_Y)==length(Exp_X))
{
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1)], Dicho_Y[length(Dicho_Y)], Last_Index)
} else {
Last_Exp_X <- c(Exp_X[(length(Dicho_Y)-1),], Dicho_Y[length(Dicho_Y)], Last_Index)
}
}
# Recovery of the index and index with shock
index <- results_forecast$index[length(results_forecast$index)]
proba <- exp(index)/(1+exp(index))
index_shock <- index + shock_size
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else {
# horizon > 0
if (t_mod==1)
{
# Index recovery and associated probability
index <- results_forecast$index[length(results_forecast$index)]
proba <- exp(index)/(1+exp(index))
# Index with shock recovery and associated probability
index_shock <- results_forecast$index[length(results_forecast$index)]
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else if (t_mod==2)
{
# Calculation of the new index
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],Var_Dicho) # last values of X and last binary
index <- Coeff_estimated %*% Last_Exp_X
proba <- exp(index)/(1+exp(index))
# Calculation of the new index with shock
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],Var_Dicho_shock) # last values of X and last binary with shock
index_shock <- Coeff_estimated %*% Last_Exp_X
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else if (t_mod==3)
{
# Calculation of the new index
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],index) # last values of X and last index
index <- Coeff_estimated %*% Last_Exp_X
proba <- exp(index)/(1+exp(index))
# Calculation of the new index with shock
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-1)],index_shock) # last values of X and last index with shock
index_shock <- Coeff_estimated %*% Last_Exp_X
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
} else if (t_mod==4)
{
# Calculation of the new index
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-2)],Var_Dicho, index) # last values of X, last binary and last index
index <- Coeff_estimated %*% Last_Exp_X
proba <- exp(index)/(1+exp(index))
# Calculation of the new index with shock
Last_Exp_X <- c(Last_Exp_X[1:(length(Last_Exp_X)-2)], Var_Dicho_shock, index_shock) # last values of X, last binary with shock and last index with shock
index_shock <- Coeff_estimated %*% Last_Exp_X
proba_shock <- exp(index_shock)/(1+exp(index_shock))
# Calculation of the dichotomous variable as a function of the threshold and the index
if (proba > OC){
Var_Dicho <- 1
}else{
Var_Dicho <- 0
}
# Calculation of the dichotomous variable as a function of the threshold and the index with shock
if (proba_shock > OC){
Var_Dicho_shock <- 1
}else{
Var_Dicho_shock <- 0
}
}
}
matrix_results[compteur_horizon + 1,1] <- compteur_horizon
matrix_results[compteur_horizon + 1,2] <- index
matrix_results[compteur_horizon + 1,3] <- index_shock
matrix_results[compteur_horizon + 1,4] <- proba
matrix_results[compteur_horizon + 1,5] <- proba_shock
matrix_results[compteur_horizon + 1,6] <- Var_Dicho
matrix_results[compteur_horizon + 1,7] <- Var_Dicho_shock
}
return(matrix_results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% GIRF pour Intervale de confiance %%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - Dicho_Y: Binary variable
# - Exp_X: Matrix of explanatory variables
# - Int: Boolean variable that is equal to TRUE (with intercept) or FALSE (without)
# - Lag: number of lag for the logistic estimation
# - t_mod:
# 1: static
# 2: dynamic with the lag binary variable
# 3: dynamiC with the lag index variable
# 4: dynamiC with both lag binary and lag index variable
# - n_simul: number of simulations for the bootstrap
# - centile_shock: percentile of the shock from the estimation errors
# - horizon: horizon
# - OC: either a value or the name of the optimal cut-off / threshold ("NSR", "CSA", "AM")
#
# Output:
# Matrix where for each simulation there are 7 columns with:
# - column 1: horizon
# - column 2: index
# - column 3: index with shock
# - column 6: probability associated to the index
# - column 7: probability associated to the index with shock
# - column 6: binary variable associated to the index
# - column 7: binary variable associated to the index with shock
Simul_GIRF <- function(Dicho_Y, Exp_X, Int, Lag, t_mod, n_simul, centile_shock, horizon, OC)
{
# Initialization of the results matrix
matrix_results <- matrix(data=0,ncol=(7*n_simul), nrow=(horizon+1))
# number of values for the bootstrapp and results
if (length(Dicho_Y)==length(Exp_X) && Int==FALSE)
{
nvalue <- 2 # 1 explanatory variable + 1 binary variable
}else if (length(Dicho_Y)==length(Exp_X) && Int==TRUE){
nvalue <- 3 # 1 explanatory variable + 1 binary variable + 1 Intercept
}else if (length(Dicho_Y)!=length(Exp_X) && Int==FALSE){
nvalue <- ncol(Exp_X)+1 # n explanatory variables + 1 binary variable
}else{
nvalue <- ncol(Exp_X)+2 # n explanatory variables + 1 binary variable + 1 Intercept
}
# Block Bootstrap estimation
matrice_bootstrap <- matrix(data=0,nrow=length(Dicho_Y),ncol= n_simul*nvalue)
matrice_bootstrap <- BlockBootstrapp(Dicho_Y, Exp_X, Int, n_simul)
# Estimation of coefficients and errors for each simulation
for (compteur_simul in 1:n_simul)
{
# Vector of binary variable
Dicho_Y_bootstrap <- matrix(data=0,ncol=1,nrow=length(Dicho_Y))
Dicho_Y_bootstrap <- matrice_bootstrap[,1+(compteur_simul-1)*nvalue]
# Matrix of explanatory variables
Exp_X_bootstrap <- matrix(data=0,ncol=(nvalue-1),nrow=length(Dicho_Y))
Exp_X_bootstrap <- matrice_bootstrap[,(2+(compteur_simul-1)*nvalue):(compteur_simul*nvalue)]
if (OC=="NSR") {
# Regression
Results_serie_bootstrap <- Logistic_Estimation(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of estimated probabilities
vecteur_proba_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_proba_bootstrap <- Results_serie_bootstrap$prob
# Recovery of the binary variable
vecteur_binary_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_binary_bootstrap <- Dicho_Y_bootstrap[(1+Lag):length(Dicho_Y_bootstrap)]
pas_optim <- 0.0001 # intervals for OC (optimal cut-off) estimation
# NSR calculation
threshold_estimated <- EWS_NSR_Criterion(vecteur_proba_bootstrap, vecteur_binary_bootstrap, pas_optim)
} else if (OC=="CSA") {
# Regression
Results_serie_bootstrap <- Logistic_Estimation(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of estimated probabilities
vecteur_proba_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_proba_bootstrap <- Results_serie_bootstrap$prob
# Recovery of the binary variable
vecteur_binary_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_binary_bootstrap <- Dicho_Y_bootstrap[(1+Lag):length(Dicho_Y_bootstrap)]
pas_optim <- 0.0001 # intervals for OC (optimal cut-off) estimation
# CSA calculation
threshold_estimated <- EWS_CSA_Criterion(vecteur_proba_bootstrap, vecteur_binary_bootstrap, pas_optim)
} else if (OC=="AM") {
# Regression
Results_serie_bootstrap <- Logistic_Estimation(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of estimated probabilities
vecteur_proba_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_proba_bootstrap <- Results_serie_bootstrap$prob
# Recovery of the binary variable
vecteur_binary_bootstrap <- as.vector(rep(0,length(Dicho_Y_bootstrap)-Lag))
vecteur_binary_bootstrap <- Dicho_Y_bootstrap[(1+Lag):length(Dicho_Y_bootstrap)]
pas_optim <- 0.0001 # intervals for OC (optimal cut-off) estimation
# AM calculation
threshold_estimated <- EWS_AM_Criterion(vecteur_proba_bootstrap, vecteur_binary_bootstrap, pas_optim)
} else {
}
# Error Calculation
Residuals_bootstrap <- Vector_Error(Dicho_Y_bootstrap, Exp_X_bootstrap, FALSE, 1, Lag, t_mod)
# Recovery of the shock in the estimation error vector
size_shock_bootstrap <- quantile(Residuals_bootstrap, centile_shock)
# Calculation of the response function
matrix_results[,(1+(7*(compteur_simul-1))):(7*compteur_simul)] <- GIRF_Dicho(Dicho_Y_bootstrap, Exp_X_bootstrap, Lag, FALSE, t_mod, horizon, size_shock_bootstrap, threshold_estimated)
}
return(matrix_results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%% GIRF Index IC %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - results_simulation_GIRF: matrix output of the Simulation_GIRF function
# - CI_bounds: size of the confidence intervals
# - n_simul: number of simulations
# Output:
# List with:
# - Simulation_CI: the index values that belong in the CI for each horizon
# - values_CI: Index values with lower bound, average index, and upper bound for each horizon
GIRF_Index_CI <- function(results_simul_GIRF, CI_bounds, n_simul, horizon_forecast){
# Index recovery
storage_index <- matrix(data=0, nrow=(horizon_forecast+1), ncol=n_simul)
for (counter_simul in 1:n_simul)
{
storage_index[,counter_simul] <- results_simul_GIRF[,(3+(7*(counter_simul-1)))] - results_simul_GIRF[,(2+(7*(counter_simul-1)))]
}
# store index for each horizon
for (counter_forecast in 1:(horizon_forecast+1))
{
storage_index[counter_forecast,] <-sort(storage_index[counter_forecast,])
}
# Remove values outside of CI
simul_inf <- ceiling( ((1-CI_bounds)/2) * n_simul ) # simulation number of the lower bound
simul_sup <- n_simul - simul_inf + 1 # simulation number of the upper bound
result_CI <- matrix(data=0,nrow=(horizon_forecast+1), ncol=(simul_sup - simul_inf + 1)) # Initialization
result_CI <- storage_index[,simul_inf:simul_sup]
# Index average for each horizon
mean_result <- as.vector(horizon_forecast+1)
for (compteur in 1:(horizon_forecast+1))
{
mean_result[compteur] <- mean(storage_index[compteur,simul_inf:simul_sup])
}
# Matrix with lower bound, average, and upper bound
result_Graph <- matrix(data=0,nrow=(horizon_forecast+1), ncol=3)
result_Graph[,1] <- storage_index[,simul_inf]
result_Graph[,2] <- mean_result
result_Graph[,3] <- storage_index[,simul_sup]
results <- list(Simulation_CI=result_CI, values_CI=result_Graph)
return(results)
}
# ================================================================
# ================================================================
# ================================================================
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%% GIRF Index IC %%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Input:
# - results_simulation_GIRF: matrix output of the Simulation_GIRF function
# - CI_bounds: size of the confidence intervals
# - n_simul: number of simulations
# Output:
# List with:
# - horizon
# - Simulation_CI_proba_shock: the proba_shock values that belong in the CI for each horizon
# - Simulation_CI_proba: the proba values that belong in the CI for each horizon
# - CI_proba_shock= proba_shock values with lower bound, average index, and upper bound for each horizon,
# - CI_proba= proba values with lower bound, average index, and upper bound for each horizon,
GIRF_Proba_CI <- function( results_simul_GIRF, CI_bounds, n_simul, horizon_forecast){
# Proba recovery
storage_proba_shock <- matrix(data=0, nrow=(horizon_forecast+1), ncol=n_simul, horizon_forecast)
storage_proba <- matrix(data=0, nrow=(horizon_forecast+1), ncol=n_simul)
for (counter_simul in 1:n_simul)
{
storage_proba_shock[,counter_simul] <- results_simul_GIRF[,(5+(7*(counter_simul-1)))]
storage_proba[,counter_simul] <- results_simul_GIRF[,(4+(7*(counter_simul-1)))]
}
# store proba for each horizon
for (counter_horizon in 1:(horizon_forecast+1))
{
storage_proba_shock[counter_horizon,] <-sort(storage_proba_shock[counter_horizon,])
storage_proba[counter_horizon,] <-sort(storage_proba[counter_horizon,])
}
# Remove values outside of CI
simul_inf <- ceiling( ((1-CI_bounds)/2) * n_simul ) # simulation number of the lower bound
simul_sup <- n_simul - simul_inf + 1 # simulation number of the upper bound
result_CI_proba_shock <- matrix(data=0,nrow=(horizon_forecast+1), ncol=(simul_sup - simul_inf + 1)) # Initialization
result_CI_proba_shock <- storage_proba_shock[,simul_inf:simul_sup]
result_CI_proba <- matrix(data=0,nrow=(horizon_forecast+1), ncol=(simul_sup - simul_inf + 1)) # Initialization
result_CI_proba <- storage_proba[,simul_inf:simul_sup]
# Averages proba for each horizon
mean_result_proba_shock <- as.vector(horizon_forecast+1)
mean_result_proba <- as.vector(horizon_forecast+1)
for (counter_horizon in 1:(horizon_forecast+1))
{
mean_result_proba_shock[counter_horizon] <- mean(result_CI_proba_shock[counter_horizon,])
mean_result_proba[counter_horizon] <- mean(result_CI_proba[counter_horizon,])
}
# Matrix with lower bound, average, and upper bound for proba shock and proba without shock
result_proba_shock <- matrix(data=0,nrow=(horizon_forecast+1), ncol=3)
result_proba_shock[,1] <- storage_proba_shock[,simul_inf]
result_proba_shock[,2] <-mean_result_proba_shock
result_proba_shock[,3] <- storage_proba_shock[,simul_sup]
result_proba <- matrix(data=0,nrow=(horizon_forecast+1), ncol=3)
result_proba[,1] <- storage_proba[,simul_inf]
result_proba[,2] <- mean_result_proba
result_proba[,3] <- storage_proba[,simul_sup]
horizon_vect <- as.vector(rep(0,horizon_forecast+1))
horizon_vect <- c(0:horizon_forecast)
results <- list(horizon = horizon_vect, Simulation_CI_proba_shock=result_CI_proba_shock, Simulation_CI_proba=result_CI_proba,
CI_proba_shock=result_proba_shock, CI_proba=result_proba)
return(results)
}
|
#Script to make plot1.png
#Read data. Missing values encoded as ?
electric_power<-read.table("household_power_consumption.txt",sep=";",header=T,na.strings = "?",stringsAsFactors = F)
#Convert date to R Date class
electric_power$Date<-strptime(electric_power$Date,format="%d/%m/%Y")
#Subset to 2007-02-01 and 2007-02-02
electric_power<-subset(electric_power,Date>=strptime("2007-02-01",format="%Y-%m-%d") & Date<=strptime("2007-02-02",format="%Y-%m-%d"))
#Set up plotting
png(file="plot1.png",width=480,height=480)
#Make histogram
with(electric_power,hist(Global_active_power,xlab="Global Active Power (kilowatts)",ylab="Frequency",col="red",main="Global Active Power"))
dev.off()
| /plot1.R | no_license | yuliangwang/ExData_Plotting1 | R | false | false | 680 | r | #Script to make plot1.png
#Read data. Missing values encoded as ?
electric_power<-read.table("household_power_consumption.txt",sep=";",header=T,na.strings = "?",stringsAsFactors = F)
#Convert date to R Date class
electric_power$Date<-strptime(electric_power$Date,format="%d/%m/%Y")
#Subset to 2007-02-01 and 2007-02-02
electric_power<-subset(electric_power,Date>=strptime("2007-02-01",format="%Y-%m-%d") & Date<=strptime("2007-02-02",format="%Y-%m-%d"))
#Set up plotting
png(file="plot1.png",width=480,height=480)
#Make histogram
with(electric_power,hist(Global_active_power,xlab="Global Active Power (kilowatts)",ylab="Frequency",col="red",main="Global Active Power"))
dev.off()
|
#!/usr/bin/env Rscript
## Library R packages ----
library(pacman)
suppressWarnings(suppressPackageStartupMessages(
p_load(
crayon,
data.table,
dplyr,
ggplot2,
ggpubr,
ggsci,
stringr,
tximport,
rtracklayer,
ggrepel,
viridis
)
))
source("/data1/linhua/QIANLAB/R_Functions/theme_linhua.R")
theme_set(theme_linhua(base_size = 18,base_family = "",legend = "right"))
theme_update(#panel.grid.minor.x = element_blank(),
#panel.grid.minor.y = element_blank(),
#panel.grid.major.x = element_blank(),
#panel.grid.major.y = element_blank(),
#axis.text.x = element_blank(),
#axis.title.x = element_blank(),
panel.spacing.x = unit(0.5, "lines"),
panel.spacing.y = unit(0.5, "lines"),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
aspect.ratio = 1
)
## ----
#setwd("/data1/linhua/QIANLAB/PROJECT/hic/HOMER")
Args<-commandArgs(trailingOnly = T)
if (length(Args)==0) {
stop("At least 1 argument must be supplied: Rscriipt script.R TEST_TE.csv", call.=FALSE)
}
#for test:
#Args<-NULL
#Args[1]<-c("/data1/linhua/QIANLAB/PROJECT/hic/HOMER/Control_PC1_R20KW60K_ByArmsH3K4.PC1.bedGraph")
#Args[2]<-c("/data1/linhua/QIANLAB/PROJECT/hic/HOMER/Heat_PC1_R20KW60K_ByArmsH3K4.PC1.bedGraph")
#BEDG1<-as.data.table(import(Args[1]))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
#BEDG2<-as.data.table(import(Args[2]))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
ImportHomerBedGraph <- function(FileInput) {
data1<-fread(FileInput,skip = 1)
colnames(data1)<-c("seqnames","start","end","score")
return(data1[,"coordinate":=paste(seqnames,":",start,"-",end,sep = "")][])
}
BEDG1<-ImportHomerBedGraph(FileInput = Args[1])
BEDG2<-ImportHomerBedGraph(FileInput = Args[2])
FileNamePrefix<-str_c(str_remove_all(basename(Args[1]),".bedGraph|_Res20KWin60K_SplitByCentromereH3K4me3"),"-VS-",str_remove_all(basename(Args[2]),".bedGraph|_Res20KWin60K_SplitByCentromereH3K4me3"),"_PC1",sep="")
FileNamePrefix <- "CheckABSwitch"
#R10KW10K<-as.data.table(import("Control_PC1_R10KW10K.PC1.bedGraph"))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
#R10KW20K<-as.data.table(import("Control_PC1_R10KW20K.PC1.bedGraph"))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
inner_join(BEDG1,BEDG2,by="coordinate") %>% as.data.table() -> MM
RES<-cor.test(MM$score.x,MM$score.y)
write(paste("Cor: ",RES$estimate),file = paste(FileNamePrefix,"_Cor.txt",sep=""))
Chr1_Cor<-paste("Chr1_Cor:",cor.test(MM[seqnames.x=="Chr1"]$score.x,MM[seqnames.x=="Chr1"]$score.y)$estimate)
Chr2_Cor<-paste("Chr2_Cor:",cor.test(MM[seqnames.x=="Chr2"]$score.x,MM[seqnames.x=="Chr2"]$score.y)$estimate)
Chr3_Cor<-paste("Chr3_Cor:",cor.test(MM[seqnames.x=="Chr3"]$score.x,MM[seqnames.x=="Chr3"]$score.y)$estimate)
Chr4_Cor<-paste("Chr4_Cor:",cor.test(MM[seqnames.x=="Chr4"]$score.x,MM[seqnames.x=="Chr4"]$score.y)$estimate)
Chr5_Cor<-paste("Chr5_Cor:",cor.test(MM[seqnames.x=="Chr5"]$score.x,MM[seqnames.x=="Chr5"]$score.y)$estimate)
c(Chr1_Cor,
Chr2_Cor,
Chr3_Cor,
Chr4_Cor,
Chr5_Cor) %>% write(file = paste(FileNamePrefix,"_CorByChr.txt",sep=""))
## ----
AB<-nrow(MM[score.x>0 & score.y<0]) ## A -> B AB
BA<-nrow(MM[score.x<0 & score.y>0]) ## B -> A BA
AA<-nrow(MM[score.x>=0 & score.y>=0]) ## A -> A AA
BB<-nrow(MM[score.x<=0 & score.y<=0]) ## B -> B BB
data.table(N = c(AB,
BA,
AA,
BB),
group = c("A -> B",
"B -> A",
"A -> A",
"B -> B")) -> df
df %>% mutate(lab= paste(round(N/sum(N)*100,1),"%",sep = "")) -> df
df$group <- factor(c("A -> B",
"B -> A",
"A -> A",
"B -> B"),
levels = c("A -> B",
"B -> A",
"A -> A",
"B -> B"))
pdf(paste(FileNamePrefix,"AB_PieChart.pdf",sep = ""))
ggpie(df, "N", label = "lab",lab.font = "white",
fill = "group", color = "white",palette = "aaas")
dev.off()
dt<-as.data.table(df)
## ----
XName<-str_c(str_remove_all(basename(Args[1]),".bedGraph|_PC1_R20KW60K_ByArmsH3K4.PC1")," PC1",sep = "")
YName<-str_c(str_remove_all(basename(Args[2]),".bedGraph|_PC1_R20KW60K_ByArmsH3K4.PC1")," PC1",sep = "")
annotations <- data.frame(
xpos = c(-Inf,Inf),
ypos = c( Inf,-Inf),
annotateText = c(
paste("B -> A : ", dt[group == "B -> A"]$N, " (", dt[group == "B -> A"]$lab, ")", sep = ""),
paste("A -> B : ", dt[group == "A -> B"]$N, " (", dt[group == "A -> B"]$lab, ")", sep = "")
),
hjustvar = c(-0.5,1) ,
vjustvar = c(1,-0.5))
pdf(file = paste(FileNamePrefix,"_Scatterplot.pdf",sep = ""),width = 8,height = 6)
P <-ggplot(MM, aes(x = score.x, y = score.y)) +
scale_x_continuous(name = XName) +
scale_y_continuous(name = YName)
L1 <- P +
geom_point(color="lightgrey",shape=21,size=1)+
geom_text(data=annotations,aes(x=xpos,y=ypos,hjust=hjustvar,vjust=vjustvar,label=annotateText,color=annotateText),color=c("red","blue"))
L2 <- geom_point(data = MM[score.x < 0 & score.y > 0 ],aes(x = score.x, y = score.y),color = "red",alpha = 1,size = 2,shape=21)
L3 <- geom_point(data = MM[score.x > 0 & score.y < 0 ],aes(x = score.x, y = score.y),color = "blue",alpha = 1,size = 2,shape=21)
L1 + L2 + L3 +
stat_cor(method = "pearson") +
geom_vline(xintercept = 0,col="black",size=0.6,linetype="solid")+
geom_hline(yintercept = 0,col="black",size=0.6,linetype="solid")+
geom_abline(intercept = 0,col="black",size=0.6,linetype="solid",slope = 1)
dev.off()
## ----
#GRanges(BEDG1[,c(1:5)]) %>% export.bed(con = "All_PCA_UsedBinsPer20K.bed")
MM[score.x>0 & score.y<0][,c(1:5)] -> MM_AB
colnames(MM_AB)<-str_remove_all(string = colnames(MM_AB),pattern = ".x")
MM_AB$seqnames<-str_replace_all(MM_AB$seqnames,pattern = "Chr",replacement = "chr")
GRanges(MM_AB) %>% export.bed(con = paste(FileNamePrefix,"_A-To-B.bed",sep = ""))
MM[score.x<0 & score.y>0]-> MM_BA
colnames(MM_BA)<-str_remove_all(string = colnames(MM_BA),pattern = ".x")
MM_BA$seqnames<-str_replace_all(MM_BA$seqnames,pattern = "Chr",replacement = "chr")
GRanges(MM_BA) %>% export.bed(con = paste(FileNamePrefix,"_B-To-A.bed",sep = ""))
| /Hi-C_Processing/ComparePC1BedGraph_WithABCompartmentChangeInfo.R | no_license | srikanthkris/Ath_Heat_Hi-C | R | false | false | 6,383 | r | #!/usr/bin/env Rscript
## Library R packages ----
library(pacman)
suppressWarnings(suppressPackageStartupMessages(
p_load(
crayon,
data.table,
dplyr,
ggplot2,
ggpubr,
ggsci,
stringr,
tximport,
rtracklayer,
ggrepel,
viridis
)
))
source("/data1/linhua/QIANLAB/R_Functions/theme_linhua.R")
theme_set(theme_linhua(base_size = 18,base_family = "",legend = "right"))
theme_update(#panel.grid.minor.x = element_blank(),
#panel.grid.minor.y = element_blank(),
#panel.grid.major.x = element_blank(),
#panel.grid.major.y = element_blank(),
#axis.text.x = element_blank(),
#axis.title.x = element_blank(),
panel.spacing.x = unit(0.5, "lines"),
panel.spacing.y = unit(0.5, "lines"),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
aspect.ratio = 1
)
## ----
#setwd("/data1/linhua/QIANLAB/PROJECT/hic/HOMER")
Args<-commandArgs(trailingOnly = T)
if (length(Args)==0) {
stop("At least 1 argument must be supplied: Rscriipt script.R TEST_TE.csv", call.=FALSE)
}
#for test:
#Args<-NULL
#Args[1]<-c("/data1/linhua/QIANLAB/PROJECT/hic/HOMER/Control_PC1_R20KW60K_ByArmsH3K4.PC1.bedGraph")
#Args[2]<-c("/data1/linhua/QIANLAB/PROJECT/hic/HOMER/Heat_PC1_R20KW60K_ByArmsH3K4.PC1.bedGraph")
#BEDG1<-as.data.table(import(Args[1]))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
#BEDG2<-as.data.table(import(Args[2]))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
ImportHomerBedGraph <- function(FileInput) {
data1<-fread(FileInput,skip = 1)
colnames(data1)<-c("seqnames","start","end","score")
return(data1[,"coordinate":=paste(seqnames,":",start,"-",end,sep = "")][])
}
BEDG1<-ImportHomerBedGraph(FileInput = Args[1])
BEDG2<-ImportHomerBedGraph(FileInput = Args[2])
FileNamePrefix<-str_c(str_remove_all(basename(Args[1]),".bedGraph|_Res20KWin60K_SplitByCentromereH3K4me3"),"-VS-",str_remove_all(basename(Args[2]),".bedGraph|_Res20KWin60K_SplitByCentromereH3K4me3"),"_PC1",sep="")
FileNamePrefix <- "CheckABSwitch"
#R10KW10K<-as.data.table(import("Control_PC1_R10KW10K.PC1.bedGraph"))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
#R10KW20K<-as.data.table(import("Control_PC1_R10KW20K.PC1.bedGraph"))[,"coordinate":=paste(seqnames,":",start,end,sep = "")]
inner_join(BEDG1,BEDG2,by="coordinate") %>% as.data.table() -> MM
RES<-cor.test(MM$score.x,MM$score.y)
write(paste("Cor: ",RES$estimate),file = paste(FileNamePrefix,"_Cor.txt",sep=""))
Chr1_Cor<-paste("Chr1_Cor:",cor.test(MM[seqnames.x=="Chr1"]$score.x,MM[seqnames.x=="Chr1"]$score.y)$estimate)
Chr2_Cor<-paste("Chr2_Cor:",cor.test(MM[seqnames.x=="Chr2"]$score.x,MM[seqnames.x=="Chr2"]$score.y)$estimate)
Chr3_Cor<-paste("Chr3_Cor:",cor.test(MM[seqnames.x=="Chr3"]$score.x,MM[seqnames.x=="Chr3"]$score.y)$estimate)
Chr4_Cor<-paste("Chr4_Cor:",cor.test(MM[seqnames.x=="Chr4"]$score.x,MM[seqnames.x=="Chr4"]$score.y)$estimate)
Chr5_Cor<-paste("Chr5_Cor:",cor.test(MM[seqnames.x=="Chr5"]$score.x,MM[seqnames.x=="Chr5"]$score.y)$estimate)
c(Chr1_Cor,
Chr2_Cor,
Chr3_Cor,
Chr4_Cor,
Chr5_Cor) %>% write(file = paste(FileNamePrefix,"_CorByChr.txt",sep=""))
## ----
AB<-nrow(MM[score.x>0 & score.y<0]) ## A -> B AB
BA<-nrow(MM[score.x<0 & score.y>0]) ## B -> A BA
AA<-nrow(MM[score.x>=0 & score.y>=0]) ## A -> A AA
BB<-nrow(MM[score.x<=0 & score.y<=0]) ## B -> B BB
data.table(N = c(AB,
BA,
AA,
BB),
group = c("A -> B",
"B -> A",
"A -> A",
"B -> B")) -> df
df %>% mutate(lab= paste(round(N/sum(N)*100,1),"%",sep = "")) -> df
df$group <- factor(c("A -> B",
"B -> A",
"A -> A",
"B -> B"),
levels = c("A -> B",
"B -> A",
"A -> A",
"B -> B"))
pdf(paste(FileNamePrefix,"AB_PieChart.pdf",sep = ""))
ggpie(df, "N", label = "lab",lab.font = "white",
fill = "group", color = "white",palette = "aaas")
dev.off()
dt<-as.data.table(df)
## ----
XName<-str_c(str_remove_all(basename(Args[1]),".bedGraph|_PC1_R20KW60K_ByArmsH3K4.PC1")," PC1",sep = "")
YName<-str_c(str_remove_all(basename(Args[2]),".bedGraph|_PC1_R20KW60K_ByArmsH3K4.PC1")," PC1",sep = "")
annotations <- data.frame(
xpos = c(-Inf,Inf),
ypos = c( Inf,-Inf),
annotateText = c(
paste("B -> A : ", dt[group == "B -> A"]$N, " (", dt[group == "B -> A"]$lab, ")", sep = ""),
paste("A -> B : ", dt[group == "A -> B"]$N, " (", dt[group == "A -> B"]$lab, ")", sep = "")
),
hjustvar = c(-0.5,1) ,
vjustvar = c(1,-0.5))
pdf(file = paste(FileNamePrefix,"_Scatterplot.pdf",sep = ""),width = 8,height = 6)
P <-ggplot(MM, aes(x = score.x, y = score.y)) +
scale_x_continuous(name = XName) +
scale_y_continuous(name = YName)
L1 <- P +
geom_point(color="lightgrey",shape=21,size=1)+
geom_text(data=annotations,aes(x=xpos,y=ypos,hjust=hjustvar,vjust=vjustvar,label=annotateText,color=annotateText),color=c("red","blue"))
L2 <- geom_point(data = MM[score.x < 0 & score.y > 0 ],aes(x = score.x, y = score.y),color = "red",alpha = 1,size = 2,shape=21)
L3 <- geom_point(data = MM[score.x > 0 & score.y < 0 ],aes(x = score.x, y = score.y),color = "blue",alpha = 1,size = 2,shape=21)
L1 + L2 + L3 +
stat_cor(method = "pearson") +
geom_vline(xintercept = 0,col="black",size=0.6,linetype="solid")+
geom_hline(yintercept = 0,col="black",size=0.6,linetype="solid")+
geom_abline(intercept = 0,col="black",size=0.6,linetype="solid",slope = 1)
dev.off()
## ----
#GRanges(BEDG1[,c(1:5)]) %>% export.bed(con = "All_PCA_UsedBinsPer20K.bed")
MM[score.x>0 & score.y<0][,c(1:5)] -> MM_AB
colnames(MM_AB)<-str_remove_all(string = colnames(MM_AB),pattern = ".x")
MM_AB$seqnames<-str_replace_all(MM_AB$seqnames,pattern = "Chr",replacement = "chr")
GRanges(MM_AB) %>% export.bed(con = paste(FileNamePrefix,"_A-To-B.bed",sep = ""))
MM[score.x<0 & score.y>0]-> MM_BA
colnames(MM_BA)<-str_remove_all(string = colnames(MM_BA),pattern = ".x")
MM_BA$seqnames<-str_replace_all(MM_BA$seqnames,pattern = "Chr",replacement = "chr")
GRanges(MM_BA) %>% export.bed(con = paste(FileNamePrefix,"_B-To-A.bed",sep = ""))
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix will create a matrix object and defines the getter/setter
## methods for the matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
xinv = NULL #initialze the matix inverse as NULL
#the set function will reset the variables for the object,
# sets the matric to the parameter passed in
# initializes the inverse to NULL
set <- function(y) {
x <<- y
xinv <<- NULL
}
get <- function() { x } #get will just return the matrix variable
setinv <- function(inv) { xinv <<- inv } # this is setting the inverse of the matrix
getinv <- function() { xinv } # this is returning the inverse
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve function will compute the inverse of the matrix
## if not computed already
## if its already computed, it will return the inverse in the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xinv <- x$getinv()
if(!is.null(xinv)) {
message("getting cached data")
return(xinv)
}
data <- x$get()
xinv <- solve(data, ...)
x$setinv(xinv)
xinv
}
| /cachematrix.R | no_license | subhashbylaiah/ProgrammingAssignment2 | R | false | false | 1,248 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix will create a matrix object and defines the getter/setter
## methods for the matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
xinv = NULL #initialze the matix inverse as NULL
#the set function will reset the variables for the object,
# sets the matric to the parameter passed in
# initializes the inverse to NULL
set <- function(y) {
x <<- y
xinv <<- NULL
}
get <- function() { x } #get will just return the matrix variable
setinv <- function(inv) { xinv <<- inv } # this is setting the inverse of the matrix
getinv <- function() { xinv } # this is returning the inverse
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve function will compute the inverse of the matrix
## if not computed already
## if its already computed, it will return the inverse in the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xinv <- x$getinv()
if(!is.null(xinv)) {
message("getting cached data")
return(xinv)
}
data <- x$get()
xinv <- solve(data, ...)
x$setinv(xinv)
xinv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-tidy-eval.R
\name{tidyeval}
\alias{tidyeval}
\alias{expr}
\alias{enquo}
\alias{enquos}
\alias{sym}
\alias{syms}
\alias{.data}
\alias{:=}
\alias{as_name}
\alias{as_label}
\title{Tidy eval helpers}
\description{
\itemize{
\item \code{\link[rlang]{sym}()} creates a symbol from a string and
\code{\link[rlang:sym]{syms}()} creates a list of symbols from a
character vector.
\item \code{\link[rlang:nse-defuse]{enquo}()} and
\code{\link[rlang:nse-defuse]{enquos}()} delay the execution of one or
several function arguments. \code{enquo()} returns a single quoted
expression, which is like a blueprint for the delayed computation.
\code{enquos()} returns a list of such quoted expressions.
\item \code{\link[rlang:nse-defuse]{expr}()} quotes a new expression \emph{locally}. It
is mostly useful to build new expressions around arguments
captured with \code{\link[=enquo]{enquo()}} or \code{\link[=enquos]{enquos()}}:
\code{expr(mean(!!enquo(arg), na.rm = TRUE))}.
\item \code{\link[rlang]{as_name}()} transforms a quoted variable name
into a string. Supplying something else than a quoted variable
name is an error.
That's unlike \code{\link[rlang]{as_label}()} which also returns
a single string but supports any kind of R object as input,
including quoted function calls and vectors. Its purpose is to
summarize that object into a single label. That label is often
suitable as a default name.
If you don't know what a quoted expression contains (for instance
expressions captured with \code{enquo()} could be a variable
name, a call to a function, or an unquoted constant), then use
\code{as_label()}. If you know you have quoted a simple variable
name, or would like to enforce this, use \code{as_name()}.
}
To learn more about tidy eval and how to use these tools, visit
\url{https://tidyeval.tidyverse.org} and the
\href{https://adv-r.hadley.nz/metaprogramming.html}{Metaprogramming
section} of \href{https://adv-r.hadley.nz}{Advanced R}.
}
\keyword{internal}
| /man/tidyeval.Rd | permissive | shamindras/maars | R | false | true | 2,047 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-tidy-eval.R
\name{tidyeval}
\alias{tidyeval}
\alias{expr}
\alias{enquo}
\alias{enquos}
\alias{sym}
\alias{syms}
\alias{.data}
\alias{:=}
\alias{as_name}
\alias{as_label}
\title{Tidy eval helpers}
\description{
\itemize{
\item \code{\link[rlang]{sym}()} creates a symbol from a string and
\code{\link[rlang:sym]{syms}()} creates a list of symbols from a
character vector.
\item \code{\link[rlang:nse-defuse]{enquo}()} and
\code{\link[rlang:nse-defuse]{enquos}()} delay the execution of one or
several function arguments. \code{enquo()} returns a single quoted
expression, which is like a blueprint for the delayed computation.
\code{enquos()} returns a list of such quoted expressions.
\item \code{\link[rlang:nse-defuse]{expr}()} quotes a new expression \emph{locally}. It
is mostly useful to build new expressions around arguments
captured with \code{\link[=enquo]{enquo()}} or \code{\link[=enquos]{enquos()}}:
\code{expr(mean(!!enquo(arg), na.rm = TRUE))}.
\item \code{\link[rlang]{as_name}()} transforms a quoted variable name
into a string. Supplying something else than a quoted variable
name is an error.
That's unlike \code{\link[rlang]{as_label}()} which also returns
a single string but supports any kind of R object as input,
including quoted function calls and vectors. Its purpose is to
summarize that object into a single label. That label is often
suitable as a default name.
If you don't know what a quoted expression contains (for instance
expressions captured with \code{enquo()} could be a variable
name, a call to a function, or an unquoted constant), then use
\code{as_label()}. If you know you have quoted a simple variable
name, or would like to enforce this, use \code{as_name()}.
}
To learn more about tidy eval and how to use these tools, visit
\url{https://tidyeval.tidyverse.org} and the
\href{https://adv-r.hadley.nz/metaprogramming.html}{Metaprogramming
section} of \href{https://adv-r.hadley.nz}{Advanced R}.
}
\keyword{internal}
|
# Copyright 2021 Bedford Freeman & Worth Pub Grp LLC DBA Macmillan Learning.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
base_url <- "https://storage.googleapis.com/torchtransformers-models/"
# maybe later store as a tibble with more info, but named vector is ok for now.
weights_url_map <- c(
"bert_tiny_uncased" = paste0(
base_url,
"bert-tiny/v1/weights.pt"
),
"bert_mini_uncased" = paste0(
base_url,
"bert-mini/v1/weights.pt"
),
"bert_small_uncased" = paste0(
base_url,
"bert-small/v1/weights.pt"
),
"bert_medium_uncased" = paste0(
base_url,
"bert-medium/v1/weights.pt"
),
"bert_base_uncased" = paste0(
base_url,
"bert-base-uncased/v1/weights.pt"
),
"bert_base_cased" = paste0(
base_url,
"bert-base-cased/v1/weights.pt"
),
"bert_large_uncased" = paste0(
base_url,
"bert-large-uncased/v1/weights.pt"
)
)
# There are some hard-to-avoid differences between the variable names in bert
# models constructed using this package and the standard variable names used in
# the huggingface saved weights. Here are some renaming rules that will (almost
# always?) be applied. We modify the *loaded* weights to match the *package*
# weights.
# Also! different models within huggingface have slightly different conventions!
# The tiny, etc. BERT models use "weight" & "bias" rather than "gamma" & "beta".
variable_names_replacement_rules <- c(
".gamma" = ".weight",
".beta" = ".bias",
"LayerNorm" = "layer_norm",
"attention.output.dense" = "attention.self.out_proj",
"bert." = ""
)
# May as well store the configuration info for known BERT models here...
# "intermediate size" is always 4x the embedding size for these models.
bert_configs <- tibble::tribble(
~model_name, ~embedding_size, ~n_layer, ~n_head, ~max_tokens, ~vocab_size,
"bert_tiny_uncased", 128L, 2L, 2L, 512L, 30522L,
"bert_mini_uncased", 256L, 4L, 4L, 512L, 30522L,
"bert_small_uncased", 512L, 4L, 8L, 512L, 30522L,
"bert_medium_uncased", 512L, 8L, 8L, 512L, 30522L,
"bert_base_uncased", 768L, 12L, 12L, 512L, 30522L,
"bert_base_cased", 768L, 12L, 12L, 512L, 28996L,
"bert_large_uncased", 1024L, 24L, 16L, 512L, 30522L
)
usethis::use_data(
weights_url_map,
variable_names_replacement_rules,
bert_configs,
internal = TRUE,
overwrite = TRUE
)
rm(
base_url,
weights_url_map,
variable_names_replacement_rules,
bert_configs
)
| /data-raw/sysdata.R | permissive | ablack3/torchtransformers | R | false | false | 2,918 | r | # Copyright 2021 Bedford Freeman & Worth Pub Grp LLC DBA Macmillan Learning.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
base_url <- "https://storage.googleapis.com/torchtransformers-models/"
# maybe later store as a tibble with more info, but named vector is ok for now.
weights_url_map <- c(
"bert_tiny_uncased" = paste0(
base_url,
"bert-tiny/v1/weights.pt"
),
"bert_mini_uncased" = paste0(
base_url,
"bert-mini/v1/weights.pt"
),
"bert_small_uncased" = paste0(
base_url,
"bert-small/v1/weights.pt"
),
"bert_medium_uncased" = paste0(
base_url,
"bert-medium/v1/weights.pt"
),
"bert_base_uncased" = paste0(
base_url,
"bert-base-uncased/v1/weights.pt"
),
"bert_base_cased" = paste0(
base_url,
"bert-base-cased/v1/weights.pt"
),
"bert_large_uncased" = paste0(
base_url,
"bert-large-uncased/v1/weights.pt"
)
)
# There are some hard-to-avoid differences between the variable names in bert
# models constructed using this package and the standard variable names used in
# the huggingface saved weights. Here are some renaming rules that will (almost
# always?) be applied. We modify the *loaded* weights to match the *package*
# weights.
# Also! different models within huggingface have slightly different conventions!
# The tiny, etc. BERT models use "weight" & "bias" rather than "gamma" & "beta".
variable_names_replacement_rules <- c(
".gamma" = ".weight",
".beta" = ".bias",
"LayerNorm" = "layer_norm",
"attention.output.dense" = "attention.self.out_proj",
"bert." = ""
)
# May as well store the configuration info for known BERT models here...
# "intermediate size" is always 4x the embedding size for these models.
bert_configs <- tibble::tribble(
~model_name, ~embedding_size, ~n_layer, ~n_head, ~max_tokens, ~vocab_size,
"bert_tiny_uncased", 128L, 2L, 2L, 512L, 30522L,
"bert_mini_uncased", 256L, 4L, 4L, 512L, 30522L,
"bert_small_uncased", 512L, 4L, 8L, 512L, 30522L,
"bert_medium_uncased", 512L, 8L, 8L, 512L, 30522L,
"bert_base_uncased", 768L, 12L, 12L, 512L, 30522L,
"bert_base_cased", 768L, 12L, 12L, 512L, 28996L,
"bert_large_uncased", 1024L, 24L, 16L, 512L, 30522L
)
usethis::use_data(
weights_url_map,
variable_names_replacement_rules,
bert_configs,
internal = TRUE,
overwrite = TRUE
)
rm(
base_url,
weights_url_map,
variable_names_replacement_rules,
bert_configs
)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/newGitbook.R
\name{newGitbook}
\alias{newGitbook}
\title{Initializes a new Gitbook.}
\usage{
newGitbook(dir, example_sections = TRUE)
}
\arguments{
\item{dir}{location of the built gitbook.}
\item{example_sections}{If TRUE, sample chapters are created.}
}
\description{
This will initalize a new Gitbook in the given directory. When done, it will
also change the working directory.
}
| /man/newGitbook.Rd | no_license | sboysel/Rgitbook | R | false | false | 472 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/newGitbook.R
\name{newGitbook}
\alias{newGitbook}
\title{Initializes a new Gitbook.}
\usage{
newGitbook(dir, example_sections = TRUE)
}
\arguments{
\item{dir}{location of the built gitbook.}
\item{example_sections}{If TRUE, sample chapters are created.}
}
\description{
This will initalize a new Gitbook in the given directory. When done, it will
also change the working directory.
}
|
library(tidytext)
# reorder_within and scale_x_reordered work.
# (Note that you need to set scales = "free_x" in the facet)
iris_gathered <- pivot_longer(iris, cols = 1:4,names_to = "metric", values_to = "value")
# Before
ggplot(iris_gathered, aes(reorder(Species, value), value)) +
geom_bar(stat = 'identity') +
facet_wrap(~ metric)
# After
ggplot(iris_gathered, aes(reorder_within(Species, value, metric), value, fill = Species)) +
geom_bar(stat = 'identity', show.legend = F) +
scale_x_reordered() +
facet_wrap(~ metric, scales = "free") | /My R functions/Arranging facet bar in ggplot.R | no_license | gbganalyst/Data-science-with-R | R | false | false | 558 | r | library(tidytext)
# reorder_within and scale_x_reordered work.
# (Note that you need to set scales = "free_x" in the facet)
iris_gathered <- pivot_longer(iris, cols = 1:4,names_to = "metric", values_to = "value")
# Before
ggplot(iris_gathered, aes(reorder(Species, value), value)) +
geom_bar(stat = 'identity') +
facet_wrap(~ metric)
# After
ggplot(iris_gathered, aes(reorder_within(Species, value, metric), value, fill = Species)) +
geom_bar(stat = 'identity', show.legend = F) +
scale_x_reordered() +
facet_wrap(~ metric, scales = "free") |
# scooby do data set 20210715
library(tidyverse)
library(lubridate)
scoobydoo <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-07-13/scoobydoo.csv')
# 603 observations...
colnames(scoobydoo)
unique(scoobydoo$series_name)
# 29 different series
# my idea how does the gender of culprits change over time...
scoobydoo %>%
# pull out columns that we need
# good practice to keep the index
select(index, date_aired, culprit_gender) %>%
# looking at the data, sometimes culprit_gender is NULL
filter(culprit_gender != "NULL") -> data
# so this gives a plot...
# shows the pattern of shows...
# where we have multiple culprits per show we need to separate the data...
max(scoobydoo$culprit_amount)
# 11 is the maximum...
# this is hard coded which feels wrong but works...
data %>%
separate(culprit_gender, into=c("cul1",
"cul2",
"cul3",
"cul4",
"cul5",
"cul6",
"cul7",
"cul8",
"cul9",
"cul10",
"cul11"),sep = ",") -> gender_wide
# gives a warning but that's OK...
# move this from wide to long
gender_wide %>%
pivot_longer(starts_with("cul"),
values_drop_na = TRUE) %>%
mutate(Gender = str_trim(value)) %>%
mutate(year = year(date_aired)) %>%
mutate(decade = floor(year/10)*10) %>%
mutate(decade = as_factor(decade)) %>%
group_by(decade, Gender) %>%
summarise(count = n()) -> data2
# make a plot...
p <- ggplot(data2, aes(x = decade,
y = count,
fill = Gender))
# add titles and some colours...
p + geom_bar(stat="identity", position="fill") +
scale_fill_brewer(palette = 16, direction=-1) +
# and add a title
labs(x = "",
y = "Gender of culprits (proportion)",
title = "Scooby Doo: culprits through the decades",
subtitle = "More badly behaved females but mostly bad males \nData from Kaggle via Tidy Tuesday") +
theme_bw() +
theme(axis.title.y = element_text(size = 14)) +
theme(axis.text.x = element_text(size = 14)) -> scooby_plot
scooby_plot
| /fig_3C.R | no_license | brennanpincardiff/fig_for_the_Biochemist | R | false | false | 2,449 | r | # scooby do data set 20210715
library(tidyverse)
library(lubridate)
scoobydoo <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-07-13/scoobydoo.csv')
# 603 observations...
colnames(scoobydoo)
unique(scoobydoo$series_name)
# 29 different series
# my idea how does the gender of culprits change over time...
scoobydoo %>%
# pull out columns that we need
# good practice to keep the index
select(index, date_aired, culprit_gender) %>%
# looking at the data, sometimes culprit_gender is NULL
filter(culprit_gender != "NULL") -> data
# so this gives a plot...
# shows the pattern of shows...
# where we have multiple culprits per show we need to separate the data...
max(scoobydoo$culprit_amount)
# 11 is the maximum...
# this is hard coded which feels wrong but works...
data %>%
separate(culprit_gender, into=c("cul1",
"cul2",
"cul3",
"cul4",
"cul5",
"cul6",
"cul7",
"cul8",
"cul9",
"cul10",
"cul11"),sep = ",") -> gender_wide
# gives a warning but that's OK...
# move this from wide to long
gender_wide %>%
pivot_longer(starts_with("cul"),
values_drop_na = TRUE) %>%
mutate(Gender = str_trim(value)) %>%
mutate(year = year(date_aired)) %>%
mutate(decade = floor(year/10)*10) %>%
mutate(decade = as_factor(decade)) %>%
group_by(decade, Gender) %>%
summarise(count = n()) -> data2
# make a plot...
p <- ggplot(data2, aes(x = decade,
y = count,
fill = Gender))
# add titles and some colours...
p + geom_bar(stat="identity", position="fill") +
scale_fill_brewer(palette = 16, direction=-1) +
# and add a title
labs(x = "",
y = "Gender of culprits (proportion)",
title = "Scooby Doo: culprits through the decades",
subtitle = "More badly behaved females but mostly bad males \nData from Kaggle via Tidy Tuesday") +
theme_bw() +
theme(axis.title.y = element_text(size = 14)) +
theme(axis.text.x = element_text(size = 14)) -> scooby_plot
scooby_plot
|
#assign("p11", NULL, envir = .GlobalEnv)
#assign("p12", NULL, envir = .GlobalEnv)
#assign("p21", NULL, envir = .GlobalEnv)
#assign("p22", NULL, envir = .GlobalEnv)
#p11 <<- par() p22 <<- par() p12 <<- par() p21 <<- par()
#p11 <- par() p22 <- par() p12 <- par() p21 <- par()
.varDiagOptions <- new.env(FALSE, globalenv())
assign("p11", NULL, envir = .varDiagOptions)
assign("p12", NULL, envir = .varDiagOptions)
assign("p21", NULL, envir = .varDiagOptions)
assign("p22", NULL, envir = .varDiagOptions)
gamsph<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*(th[1]+th[2]*(3/2*(h/th[3])-1/2*(h/th[3])^3))+(h>th[3])*(th[1]+th[2])}
fth<-function(th,y,h1,w1=1){(y-gamsph(h1,th))/w1}
ftc<-function(th,y,h1,w1){(y-gamsph(h1,th))/gamsph(h1,th)}
ftg<-function(th,y,h1,cv1){cv1%*%(y-gamsph(h1,th))}
fts <- function(th, y, h1, cv1) {
cv1 %*% (y-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h1,th)^0.25)
}
ftsOpt <- function(th, y, h1, cv1) {
ret = cv1 %*% (y-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h1,th)^0.25)
mean(ret^2)
}
gamsph1<-function(h,th=rbind(1,1,1)){1}
gamsph2<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*(3/2*(h/th[3])-1/2*(h/th[3])^3)+(h>th[3])}
gamsph3<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*3/2*th[2]/th[3]*((h/th[3])^3-h/th[3])}
hyperg<-function(r){
f<-1+1.125*r+1.1484375*r^2+1.158007813*r^3+1.16317749*r^4+1.166408539*r^5;
#a<-0.75;
#b<-0.75;
#c<-0.5;
#k<-1;
#f<-1;
#n<-ceiling(10+exp((max(r)-1)*30)*500);
#n<-10;
#for (i in 2:50){
# k<-k*(a+i-2)*(b+i-2)/(c+i-2)*r/(i-1);
# f<-f+k
# }
f}
ficorr<-function(r){gamma(0.75)^2/(sqrt(pi)-gamma(0.75)^2)*((1-r^2)*hyperg(r^2)-1)}
estvar <- function(h0, y, iter=50, tolerance=0.0002, trace=1, th0=rbind(0,1,1))
{
#EJP added:
#stop("this function requires nlregb (an S-Plus proprietary function) to work")
n<-ceiling(sqrt(2*length(h0)))
#Vorbereitung fuer covgamma
n1<-n*(n-1)/2
#1. index der gamma[i,j] matrix
i1<-matrix(1:n,n,n)
#1. teil des zeilenindex der covgamma gamma matrix
k1<-matrix(i1[row(i1)<col(i1)],n1,n1)
#2. teil des zeilenindex der covgamma gamma matrix
k2<-matrix(t(i1)[row(i1)<col(i1)],n1,n1)
#1. teil des spaltenindex der covgamma gamma matrix
k3<-t(k1)
#2. teil des spaltenindex der covgamma gamma matrix
k4<-t(k2)
if(!missing(th0)) {
#EJP outcommented:
#opt<-nlregb(n*(n-1)/2,cbind(0,max(y/2),max(h0)),fts,y=y^0.25,h1=h0,cv1=diag(n1),lower=cbind(0,0,0))
opt<-optim(par = c(0,max(y/2),max(h0)), ftsOpt,
lower=cbind(0,0,0), method = "L-BFGS-B",
y=y^0.25, h1=h0, cv1=diag(n1))
th1 <- opt$par
}
else
th1<-th0
th1<-cbind(0,max(y/2),max(h0))
#th0<-th1_c(3.72635248595876, 15.5844183738953, 1.22109233789852)
#th1<-c(0.0000000,7.6516077,0.7808538)
for (i in 1:iter) {
if(trace>0)
print(i)
gg<-sqrt(2*gamsph(h0,th1))
#Spalte 1, Spalte 2, ...
#gamma vektor wird als matrix dargestellt
tt<-matrix(gg[(t(i1)-2)*(t(i1)-1)/2+i1],n,n)
#symmetrisierung
tt1<-tt
tt1[row(tt1)>col(tt1)]<-t(tt)[row(tt1)>col(tt1)]
#diagonale loeschen
tt1[row(tt1)==col(tt1)]<-0
#covgamma wird berechnet
cg<-matrix(tt1[(k4-1)*n+k1]+tt1[(k2-1)*n+k3]-tt1[(k3-1)*n+k1]-tt1[(k4-1)*n+k2],n1,n1)
cgcg<-outer(gg,gg,"*")
corg<-sqrt(cgcg)*ficorr((cg*lower.tri(cg))/cgcg)
corg<-sqrt(2)*(sqrt(pi)-gamma(0.75)^2)/pi*(corg+t(corg)+diag(gg))
infm<-solve(corg);
cv<-chol((infm+t(infm))/2);
#sc<-cbind(1/th1[2],1/th1[2],1/th1[3])
#EJP outcommented:
#opt<-nlregb(n*(n-1)/2,th1,fts,y=y^0.25,h1=h0,cv1=cv,lower=cbind(0,0,0))
opt <- optim(par = th1, ftsOpt,
lower=cbind(0,0,0), method = "L-BFGS-B",
y=y^0.25,h1=h0,cv1=cv)
if(trace>0) print(opt$par)
if(sum(abs((th1-opt$par)/(th1+0.00001)))<=tolerance)
break
th1<-opt$par
}
print("Fertig")
v<-list(pars=opt$par)
v$cg<-corg
v$res<-y^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h0,v$pars)^0.25
v$lof<-t(v$res)%*%solve(corg,v$res)
v
}
varobj<-function(m,iter=50,tolerance=0.0002,trace=1,loo=FALSE){
n<-dim(m)[1]
#a1<-t(m[,3]-t(matrix(m[,3],n,n)))
#b1<-t(m[,1]-t(matrix(m[,1],n,n)))
#c1<-t(m[,2]-t(matrix(m[,2],n,n)))
a1<-outer(m[,3],m[,3],FUN="-")
b1<-outer(m[,1],m[,1],FUN="-")
c1<-outer(m[,2],m[,2],FUN="-")
#d1<-cbind(sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2),a1[row(a1)<col(a1)]^2)
y<-a1[row(a1)<col(a1)]^2/2
h0<-sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2)
v<-estvar(h0,y,iter,tolerance,trace)
XM<-cbind(gamsph1(h0,v$pars),gamsph2(h0,v$pars),gamsph3(h0,v$pars))*(gamsph(h0,v$pars))^(-0.75)/4
v$info<-solve(t(XM)%*%solve(v$cg,XM))
loores<-matrix(0,n,n)
tha<-matrix(0,n,3)
lofa<-matrix(0,n,1)
cda<-matrix(0,n,1)
v$h<-h0
v$y<-y
if(loo==TRUE){
for (i in 1:n){
print(i)
m1<-m[-i,]
a1<-t(m1[,3]-t(matrix(m1[,3],n-1,n-1)))
b1<-t(m1[,1]-t(matrix(m1[,1],n-1,n-1)))
c1<-t(m1[,2]-t(matrix(m1[,2],n-1,n-1)))
y<-a1[row(a1)<col(a1)]^2/2
h0<-sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2)
z<-estvar(h0,y,iter,tolerance,trace,th0=v$pars)
lofa[i,1]<-v$lof-z$lof
tha[i,]<-z$pars
cda[i,1]<-t(v$pars-z$pars)%*%v$info%*%(v$pars-z$pars)
mm2<-m[i,]
mm3<-t(t(m)-mm2)^2/2
h<-sqrt(mm3[,1]+mm3[,2])
loores[i,]<-mm3[,3]^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h,z$pars)^0.25
}
}
v$loores<-loores
v$tha<-tha
v$lofa<-lofa
v$cda<-cda
v$data<-m
class(v)<-"varobj"
v
}
print.varobj<-function(x,...){print(x$pars); print(x$lof);invisible(x)}
#r[row(r)<col(r)]<-v$res
#r<-r+t(r)
PlotDiag.varobj<-function(v, region = NULL, xyi = 0, zmv = 0) {
palette(c("black","cyan","magenta","green3","yellow","blue","white","red"))
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
XM<-cbind(gamsph1(v$h,v$pars),gamsph2(v$h,v$pars),gamsph3(v$h,v$pars))*(gamsph(v$h,v$pars))^(-0.75)/4
Vare<-v$cg-XM%*%solve(t(XM)%*%solve(v$cg,XM),t(XM))
#sig<-mean(sqrt(diag(Vare)))
e<-v$res
sig<-sqrt(sum(e^2)/(n-3))
gdd<-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25+e*sig/sqrt(diag(Vare))
r1<-v$loores[row(v$loores)<col(v$loores)]
tloores<-t(v$loores)
r2<-tloores[row(tloores)<col(tloores)]
resi<-v$loores-v$loores
resi[row(resi)<col(resi)]<-v$res
resi<-resi+t(resi)
n0<-length(v$lofa)
xn<-v$data[,c(2,1)]
xy<-matrix(0,n,4)
te<-crossprod(matrix(1,1,n0),t(xn[,1]))
xy[,1]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,1]),matrix(1,1,n0))
xy[,2]<-te[row(te)<col(te)]
te<-crossprod(matrix(1,1,n0),t(xn[,2]))
xy[,3]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,2]),matrix(1,1,n0))
xy[,4]<-te[row(te)<col(te)]
if(!missing(xyi)){
ix<-ceiling(sqrt((2*(xyi)+0.25))-0.5)+1
iy<-(xyi)-ceiling(sqrt((2*(xyi)+0.25))-0.5)/2*(ceiling(sqrt((2*(xyi)+0.25))-0.5)-1)
nl<-n
#*(n-1)/2
ind1<-ceiling(sqrt((2*(1:nl)+0.25))-0.5)+1
ind2<-(1:nl)-ceiling(sqrt((2*(1:nl)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:nl)+0.25))-0.5)-1)
}
paro<-par(no.readonly=TRUE)
par(mfrow=c(2,2), mar = c(3,2,2,1)+.1, bg="white")
#EJP: moved (1,1) to be first plotted:
#graphsheet(win.width=0.5,win.height=0.5,win.left=0,win.top=0,Name="1) MAP")
# plot map view as left plot in first row
dg<-1:length(v$lofa)
for(i in 1:length(v$lofa)){
dg[i]<-sum((0.822*(gamsph(v$h,c(v$tha[i,1],v$tha[i,2],v$tha[i,3]))^0.25-gamsph(v$h,v$pars)^0.25))^2)
}
#if(! exists("zmv")) zmv<-0
#if(zmv==0) plot(xn[,2],xn[,1],xlim=c(-0.6,1.1),ylim=c(-0.28, 0.28))
#if(zmv==0) plot(xn[,2],xn[,1],xlim=c(-1.1,1.1),ylim=c(-0.28, 0.28),lwd=3)
if (zmv==0 && !is.null(region))
plot(xn[,2],xn[,1],xlim=c(min(region[,1]), max(region[,1])),
ylim=c(min(region[,2]), max(region[,2])),lwd=3,
asp = 1, xlab = "", ylab = "")
else
plot(xn[,2],xn[,1],xlim=c(min(xn[,2]), max(xn[,2])),
ylim=c(min(xn[,1]), max(xn[,1])),lwd=3,
asp = 1, xlab = "", ylab = "")
#lolo<-lof[1,1]-lofa
z<-xn[,1]
if(zmv>0){
z<-switch(zmv,v$data[,3],v$cda,v$lofa,dg)
inc<-0.25
rmin<-0.03
epsi<-(max(z)-min(z))/(inc/rmin-1)
# symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,xlim=c(-0.63,1.14),ylim=c(-0.3, 0.28))
if(zmv>0 && !is.null(region))
symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,
xlim=c(min(region[,1]), max(region[,1])),
ylim=c(min(region[,2]), max(region[,2])),lwd=3)
else
symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,
xlim=c(min(xn[,2]), max(xn[,2])),
ylim=c(min(xn[,1]), max(xn[,1])),lwd=3)
# symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,lwd=3)
}
if(!is.null(region)) polygon(region[,1],region[,2],density=0,col=2)
title(paste("Map View",switch(zmv+1,'','(y)',"(Cook's Distance)",'(Mahalanobis Distance)',"(Cook's Distance)")))
#gsdmpv<-dev.cur()
if(!missing(xyi)){
segments(xy[xyi,3],xy[xyi,1],xy[xyi,4],xy[xyi,2],pch=16,col=3,lwd=3)
points(xy[xyi,3],xy[xyi,1],pch=16,col=6)
points(xy[xyi,4],xy[xyi,2],pch=16,col=8)
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[ix,2],xn[ix,1]))
text(xn[ix,2],xn[ix,1]-(max(z)-min(z))/10,paste(ix))
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[iy,2],xn[iy,1]))
text(xn[iy,2],xn[iy,1]-(max(z)-min(z))/10,paste(iy))
}
assign("p11", par(no.readonly=TRUE), envir = .varDiagOptions)
# EJP-end
#graphsheet(win.width=0.8,win.height=1,win.left=0,win.top=0,Name="Interactive Variogram Plot")
#windows(width = 8, height = 5.5,rescale="R")
#windows()
par(mfg=c(2,1))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0,win.top=0.5,Name="3) LOO")
plot(matrix(cbind(v$res,v$res),n*2,1), matrix(cbind(r1,r2),n*2,1),
pch=1, xlab="", ylab="", lwd=1)
lines(c(min(v$res),max(v$res)),c(min(v$res),max(v$res)),col=8)
segments(v$res,r1,v$res,r2)
title("Leave One Out Residuals")
if(!missing(xyi)){
print("xyi")
print(xyi)
points(v$res[xyi],r1[xyi],pch=18,col=3)
points(v$res[xyi],r2[xyi],pch=18,col=5)
points(t(resi[ix,-ix]),t(v$loores[ix,-ix]),pch=16,col=6)
points(t(resi[iy,-iy]),t(v$loores[iy,-iy]),pch=16,col=8)
segments(v$res[xyi],r1[xyi],v$res[xyi],r2[xyi],col=3,lwd=5)
}
assign("p21", par(no.readonly=TRUE), envir = .varDiagOptions)
cv1<-cv
i<-1:n
di<-dim(v$cg)[1]
if(!missing(xyi)){
# di<-dim(v$cg)[1]
# pm<-diag(di)
# pm[xyi,]<-diag(di)[di,]
# pm[di,]<-diag(di)[xyi,]
# cg1<-pm%*%v$cg%*%pm
# i[n]<-xyi
# i[xyi]<-n
# print(max(abs(cv1-cv)))
i<-c(sample(seq(di)[-xyi]),xyi)
cg1<-v$cg[i,i]
infm<-solve(cg1);
cv1<-chol((infm+t(infm))/2);
}
par(mfg=c(2,2))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0.5,win.top=0.5,Name="4) DCR")
x<-((2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)[i]
y<-v$res[i]
cv1<-cv1/cv1[di,di]
plot(cv1%*%x,cv1%*%y,xlab="",ylab="",lwd=1)
if(!missing(xyi))
points(x[n],y[n],pch=16,col=3)
#sm<-lowess(cv1%*%x,cv1%*%y)
#lines(sm$x,sm$y,lwd=3)
glu<-min(cv1%*%x)
glo<-max(cv1%*%x)
lines(c(glu,glo),c(0,0))
title("Decorrelated Residuals")
assign("p22", par(no.readonly=TRUE), envir = .varDiagOptions)
xv<-seq(0.0001,max(v$h),0.01)
par(mfg=c(1,2))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0.5,win.top=0,Name="2) SVC")
plot(v$h,gdd,xlab="",ylab="",lwd=1)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25,lwd=3)
title("Studentized Square Root Cloud")
if(!missing(xyi)){
points(v$h[ind1==ix | ind2 == ix],gdd[ind1==ix | ind2 == ix],pch=16,col=6)
points(v$h[ind1==iy | ind2 == iy],gdd[ind1==iy | ind2 == iy],pch=16,col=8)
points(v$h[xyi],gdd[xyi],pch=16,col=3)
}
assign("p12", par(no.readonly=TRUE), envir = .varDiagOptions)
par(paro)
n
}
CookRLF.varobj<-function(v){
n<-length(v$lofa)
lofa<-matrix(0,n,1)
i1<-matrix(1:n,n,n)
for (k in 1:n){
ii<-(i1[row(i1)<col(i1)]==k)|(t(i1)[row(t(i1))<col(t(i1))]==k)
cgt<-v$cg[!ii,!ii]
rt<-v$y[!ii]^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h[!ii],v$tha[k,])^0.25
lofa[k]<-rt%*%solve(cgt,rt)
}
dg<-1:length(v$lofa)
for(i in 1:length(v$lofa)){
dg[i]<-sum((0.822*(gamsph(v$h,c(v$tha[i,1],v$tha[i,2],v$tha[i,3]))^0.25-gamsph(v$h,v$pars)^0.25))^2)
}
plot((v$lof[1]-lofa)/v$lof[1]*187/19,dg/(3*v$lof[1])*187,ylab="Cook's Distance",xlab="Reduktion im Lack of Fit")
identify((v$lof[1]-lofa)/v$lof[1]*187/19,dg/(3*v$lof[1])*187)
}
QQVarcloud.varobj<-function(v){
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
plot(qnorm(seq(from=1/(2*n),length=n,by=1/n)),sort(cv%*%v$y),
xlab="quantile of standard normal distribution",
ylab="orderd decorrelated residual")
lines(c(-3,3),c(-3,3),col=8,lwd=3)
apply(t(apply(apply(matrix(rnorm(n*100),ncol=100),2,sort),1,quantile,probs=c(0.05/n,1-0.05/n))),2,lines,x=qnorm(seq(from=1/(2*n),length=n,by=1/n)))
}
QQDecorr.varobj<-function(v){
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
plot(qchisq(seq(from=1/(2*n),length=n,by=1/n),1),sort(v$y/gamsph(v$h,v$pars)),
xlab="quantile of Chi-square distribution",
ylab="ordered value of [Z(s)-Z(s')]^2/(2g(s-s'))")
apply(t(apply(apply(matrix(rchisq(n*100,1),ncol=100),2,sort),1,quantile,probs=c(0.05/n,1-0.05/n))),2,lines,x=qchisq(seq(from=1/(2*n),length=n,by=1/n),1))
lines(c(0,8),c(0,8),col=8,lwd=3)
}
interact.varobj<-function(v,region=NULL,g="s",pchi=0.05,zmv=0){
#Identifikation in studentisierter VC
palette(c("black","cyan","magenta","green3","yellow","blue","white","red"))
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
XM<-cbind(gamsph1(v$h,v$pars),gamsph2(v$h,v$pars),gamsph3(v$h,v$pars))*(gamsph(v$h,v$pars))^(-0.75)/4
Vare<-v$cg-XM%*%solve(t(XM)%*%solve(v$cg,XM),t(XM))
#sig<-mean(sqrt(diag(Vare)))
e<-v$res
sig<-sqrt(sum(e^2)/(n-3))
gdd<-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25+e*sig/sqrt(diag(Vare))
xn<-v$data[,c(2,1)]
r1<-v$loores[row(v$loores)<col(v$loores)]
tloores<-t(v$loores)
r2<-tloores[row(tloores)<col(tloores)]
resi<-v$loores-v$loores
resi[row(resi)<col(resi)]<-v$res
resi<-resi+t(resi)
n0<-length(v$lofa)
xn<-v$data[,c(2,1)]
xy<-matrix(0,n,4)
te<-crossprod(matrix(1,1,n0),t(xn[,1]))
xy[,1]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,1]),matrix(1,1,n0))
xy[,2]<-te[row(te)<col(te)]
te<-crossprod(matrix(1,1,n0),t(xn[,2]))
xy[,3]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,2]),matrix(1,1,n0))
xy[,4]<-te[row(te)<col(te)]
if(g=="l"){
par(mfrow=c(2,2), mfg=c(2,1))
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
xyi<-identify(matrix(cbind(v$res,v$res),n*2,1),matrix(cbind(r1,r2),n*2,1),plot=FALSE,n=1)
if(xyi>n) xyi<-xyi-n
}
if(g=="m"){
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
ix0<-identify(xn[,2],xn[,1],plot=TRUE,n=1)
points(xn[ix0,2],xn[ix0,1],pch=16,col=6)
ind1<-ceiling(sqrt((2*(1:n)+0.25))-0.5)+1
ind2<-(1:n)-ceiling(sqrt((2*(1:n)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:n)+0.25))-0.5)-1)
par(get("p12", envir = .varDiagOptions))
par(mfrow=c(2,2),mfg=c(1,2),fig=c(0.5,1,0.5,1))
# par(mfg=c(1,2,2,2))
# par(fig=c(0.5,1,0.5,1))
points(v$h[ind1==ix0 | ind2 == ix0],gdd[ind1==ix0 | ind2 == ix0],pch=16,col=6)
par(mfrow=c(2,2), mfg=c(2,1))
#par(p21)
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
points(t(resi[ix0,-ix0]),t(v$loores[ix0,-ix0]),pch=16,col=6)
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
iy0<-identify(xn[,2],xn[,1],plot=FALSE,n=1)
if(length(iy0)>0){
ix<-max(ix0,iy0)
iy<-min(ix0,iy0)
xyi<-(ix-1)*(ix-2)/2+iy}
else{
xyi<-0
# dev.off()
# PlotDiag.varobj(v,region,zmv=zmv)
# par(mfrow=c(2,2))
# par(mfg=c(1,1,2,2))
# par(p11)
# par(fig=c(0,0.5,0.5,1))
# points(xn[ix0,2],xn[ix0,1],pch=16,col=6)
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[ix0,2],xn[ix0,1]))
ind1<-ceiling(sqrt((2*(1:n)+0.25))-0.5)+1
ind2<-(1:n)-ceiling(sqrt((2*(1:n)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:n)+0.25))-0.5)-1)
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[ind1==ix0 | ind2 == ix0],gdd[ind1==ix0 | ind2 == ix0],pch=16,col=6)
par(mfrow=c(2,2), mfg=c(2,1))
#par(p21)
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
points(t(resi[ix0,-ix0]),t(v$loores[ix0,-ix0]),pch=16,col=6)
}
}
if(g=="s"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
xyi<-identify(v$h,gdd,plot=FALSE,n=1)
}
if(g=="t"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
p<-locator(n=500,type="l",col=4)
m<-length(p$x)
lines(p$x[c(m,1)],p$y[c(m,1)],col=2)
i<-t(outer(v$h,p$x,FUN="-"))/(p$x[c(2:m,1)]-p$x)
gt<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))>=gdd&t((i>=0)&(i<=1)),1,"sum")
s<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))<=gdd&t((i>=0)&(i<=1)),1,"sum")
i0<-(s%%2)|(gt%%2)
dev.off()
PlotDiag.varobj(v,region)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i0],gdd[i0],pch=16,col=3)
polygon(p,density=0,col=4)
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
segments(xy[i0,3],xy[i0,1],xy[i0,4],xy[i0,2],pch=16,col=3,lwd=3)
xyi<-0
}
if(g=="x"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
i0<-(gdd-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)/sig>qnorm(1-pchi/2)
i0a<-(-gdd+(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)/sig>qnorm(1-pchi/2)
dev.off()
PlotDiag.varobj(v,region,zmv=zmv)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i0],gdd[i0],pch=16,col=3)
points(v$h[i0a],gdd[i0a],pch=16,col=4)
xv<-seq(0.0001,max(v$h),0.01)
# lines(xv,gamsph(xv,v$pars)*qchisq(1-pchi,1),lty=4,lwd=2)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25+sig*qnorm(1-pchi/2),lty=4,lwd=2)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25-sig*qnorm(1-pchi/2),lty=4,lwd=2)
par(get("p11", envir = .varDiagOptions))
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
print(xy[i0,])
par(fig=c(0,0.5,0.5,1))
segments(xy[i0,3],xy[i0,1],xy[i0,4],xy[i0,2],pch=16,col=3,lwd=2)
segments(xy[i0a,3],xy[i0a,1],xy[i0a,4],xy[i0a,2],pch=16,col=4,lwd=2)
xyi<-0
}
if(g=="n"){
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
p<-locator(n=500,type="l",pch=16,col=4)
m<-length(p$x)
lines(p$x[c(m,1)],p$y[c(m,1)],col=2)
i<-t(outer(xn[,2],p$x,FUN="-"))/(p$x[c(2:m,1)]-p$x)
gt<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))>=xn[,1]&t((i>=0)&(i<=1)),1,"sum")
s<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))<=xn[,1]&t((i>=0)&(i<=1)),1,"sum")
i0<-(s%%2)|(gt%%2)
nl<-length(v$h)
ind1<-ceiling(sqrt((2*(1:nl)+0.25))-0.5)+1
ind2<-(1:nl)-ceiling(sqrt((2*(1:nl)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:nl)+0.25))-0.5)-1)
i00<-match(ind1,(1:n0)[i0],nomatch=FALSE)&match(ind2,(1:n0)[i0],nomatch=FALSE)
dev.off()
PlotDiag.varobj(v,region)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i00],gdd[i00],pch=16,col=3)
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
polygon(p,density=0,col=4)
segments(xy[i00,3],xy[i00,1],xy[i00,4],xy[i00,2],pch=16,col=3,lwd=3)
xyi = 0
}
#print(xyi)
if(g!="t"&g!="x"&g!="n"& xyi>0){
dev.off()
PlotDiag.varobj(v,region,xyi=xyi,zmv=zmv)
}
xyi}
| /R/VARDIAG.R | no_license | edzer/vardiag | R | false | false | 19,993 | r | #assign("p11", NULL, envir = .GlobalEnv)
#assign("p12", NULL, envir = .GlobalEnv)
#assign("p21", NULL, envir = .GlobalEnv)
#assign("p22", NULL, envir = .GlobalEnv)
#p11 <<- par() p22 <<- par() p12 <<- par() p21 <<- par()
#p11 <- par() p22 <- par() p12 <- par() p21 <- par()
.varDiagOptions <- new.env(FALSE, globalenv())
assign("p11", NULL, envir = .varDiagOptions)
assign("p12", NULL, envir = .varDiagOptions)
assign("p21", NULL, envir = .varDiagOptions)
assign("p22", NULL, envir = .varDiagOptions)
gamsph<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*(th[1]+th[2]*(3/2*(h/th[3])-1/2*(h/th[3])^3))+(h>th[3])*(th[1]+th[2])}
fth<-function(th,y,h1,w1=1){(y-gamsph(h1,th))/w1}
ftc<-function(th,y,h1,w1){(y-gamsph(h1,th))/gamsph(h1,th)}
ftg<-function(th,y,h1,cv1){cv1%*%(y-gamsph(h1,th))}
fts <- function(th, y, h1, cv1) {
cv1 %*% (y-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h1,th)^0.25)
}
ftsOpt <- function(th, y, h1, cv1) {
ret = cv1 %*% (y-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h1,th)^0.25)
mean(ret^2)
}
gamsph1<-function(h,th=rbind(1,1,1)){1}
gamsph2<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*(3/2*(h/th[3])-1/2*(h/th[3])^3)+(h>th[3])}
gamsph3<-function(h,th=rbind(1,1,1)){(0<h)*(h<=th[3])*3/2*th[2]/th[3]*((h/th[3])^3-h/th[3])}
hyperg<-function(r){
f<-1+1.125*r+1.1484375*r^2+1.158007813*r^3+1.16317749*r^4+1.166408539*r^5;
#a<-0.75;
#b<-0.75;
#c<-0.5;
#k<-1;
#f<-1;
#n<-ceiling(10+exp((max(r)-1)*30)*500);
#n<-10;
#for (i in 2:50){
# k<-k*(a+i-2)*(b+i-2)/(c+i-2)*r/(i-1);
# f<-f+k
# }
f}
ficorr<-function(r){gamma(0.75)^2/(sqrt(pi)-gamma(0.75)^2)*((1-r^2)*hyperg(r^2)-1)}
estvar <- function(h0, y, iter=50, tolerance=0.0002, trace=1, th0=rbind(0,1,1))
{
#EJP added:
#stop("this function requires nlregb (an S-Plus proprietary function) to work")
n<-ceiling(sqrt(2*length(h0)))
#Vorbereitung fuer covgamma
n1<-n*(n-1)/2
#1. index der gamma[i,j] matrix
i1<-matrix(1:n,n,n)
#1. teil des zeilenindex der covgamma gamma matrix
k1<-matrix(i1[row(i1)<col(i1)],n1,n1)
#2. teil des zeilenindex der covgamma gamma matrix
k2<-matrix(t(i1)[row(i1)<col(i1)],n1,n1)
#1. teil des spaltenindex der covgamma gamma matrix
k3<-t(k1)
#2. teil des spaltenindex der covgamma gamma matrix
k4<-t(k2)
if(!missing(th0)) {
#EJP outcommented:
#opt<-nlregb(n*(n-1)/2,cbind(0,max(y/2),max(h0)),fts,y=y^0.25,h1=h0,cv1=diag(n1),lower=cbind(0,0,0))
opt<-optim(par = c(0,max(y/2),max(h0)), ftsOpt,
lower=cbind(0,0,0), method = "L-BFGS-B",
y=y^0.25, h1=h0, cv1=diag(n1))
th1 <- opt$par
}
else
th1<-th0
th1<-cbind(0,max(y/2),max(h0))
#th0<-th1_c(3.72635248595876, 15.5844183738953, 1.22109233789852)
#th1<-c(0.0000000,7.6516077,0.7808538)
for (i in 1:iter) {
if(trace>0)
print(i)
gg<-sqrt(2*gamsph(h0,th1))
#Spalte 1, Spalte 2, ...
#gamma vektor wird als matrix dargestellt
tt<-matrix(gg[(t(i1)-2)*(t(i1)-1)/2+i1],n,n)
#symmetrisierung
tt1<-tt
tt1[row(tt1)>col(tt1)]<-t(tt)[row(tt1)>col(tt1)]
#diagonale loeschen
tt1[row(tt1)==col(tt1)]<-0
#covgamma wird berechnet
cg<-matrix(tt1[(k4-1)*n+k1]+tt1[(k2-1)*n+k3]-tt1[(k3-1)*n+k1]-tt1[(k4-1)*n+k2],n1,n1)
cgcg<-outer(gg,gg,"*")
corg<-sqrt(cgcg)*ficorr((cg*lower.tri(cg))/cgcg)
corg<-sqrt(2)*(sqrt(pi)-gamma(0.75)^2)/pi*(corg+t(corg)+diag(gg))
infm<-solve(corg);
cv<-chol((infm+t(infm))/2);
#sc<-cbind(1/th1[2],1/th1[2],1/th1[3])
#EJP outcommented:
#opt<-nlregb(n*(n-1)/2,th1,fts,y=y^0.25,h1=h0,cv1=cv,lower=cbind(0,0,0))
opt <- optim(par = th1, ftsOpt,
lower=cbind(0,0,0), method = "L-BFGS-B",
y=y^0.25,h1=h0,cv1=cv)
if(trace>0) print(opt$par)
if(sum(abs((th1-opt$par)/(th1+0.00001)))<=tolerance)
break
th1<-opt$par
}
print("Fertig")
v<-list(pars=opt$par)
v$cg<-corg
v$res<-y^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h0,v$pars)^0.25
v$lof<-t(v$res)%*%solve(corg,v$res)
v
}
varobj<-function(m,iter=50,tolerance=0.0002,trace=1,loo=FALSE){
n<-dim(m)[1]
#a1<-t(m[,3]-t(matrix(m[,3],n,n)))
#b1<-t(m[,1]-t(matrix(m[,1],n,n)))
#c1<-t(m[,2]-t(matrix(m[,2],n,n)))
a1<-outer(m[,3],m[,3],FUN="-")
b1<-outer(m[,1],m[,1],FUN="-")
c1<-outer(m[,2],m[,2],FUN="-")
#d1<-cbind(sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2),a1[row(a1)<col(a1)]^2)
y<-a1[row(a1)<col(a1)]^2/2
h0<-sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2)
v<-estvar(h0,y,iter,tolerance,trace)
XM<-cbind(gamsph1(h0,v$pars),gamsph2(h0,v$pars),gamsph3(h0,v$pars))*(gamsph(h0,v$pars))^(-0.75)/4
v$info<-solve(t(XM)%*%solve(v$cg,XM))
loores<-matrix(0,n,n)
tha<-matrix(0,n,3)
lofa<-matrix(0,n,1)
cda<-matrix(0,n,1)
v$h<-h0
v$y<-y
if(loo==TRUE){
for (i in 1:n){
print(i)
m1<-m[-i,]
a1<-t(m1[,3]-t(matrix(m1[,3],n-1,n-1)))
b1<-t(m1[,1]-t(matrix(m1[,1],n-1,n-1)))
c1<-t(m1[,2]-t(matrix(m1[,2],n-1,n-1)))
y<-a1[row(a1)<col(a1)]^2/2
h0<-sqrt(b1[row(b1)<col(b1)]^2+c1[row(c1)<col(c1)]^2)
z<-estvar(h0,y,iter,tolerance,trace,th0=v$pars)
lofa[i,1]<-v$lof-z$lof
tha[i,]<-z$pars
cda[i,1]<-t(v$pars-z$pars)%*%v$info%*%(v$pars-z$pars)
mm2<-m[i,]
mm3<-t(t(m)-mm2)^2/2
h<-sqrt(mm3[,1]+mm3[,2])
loores[i,]<-mm3[,3]^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(h,z$pars)^0.25
}
}
v$loores<-loores
v$tha<-tha
v$lofa<-lofa
v$cda<-cda
v$data<-m
class(v)<-"varobj"
v
}
print.varobj<-function(x,...){print(x$pars); print(x$lof);invisible(x)}
#r[row(r)<col(r)]<-v$res
#r<-r+t(r)
PlotDiag.varobj<-function(v, region = NULL, xyi = 0, zmv = 0) {
palette(c("black","cyan","magenta","green3","yellow","blue","white","red"))
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
XM<-cbind(gamsph1(v$h,v$pars),gamsph2(v$h,v$pars),gamsph3(v$h,v$pars))*(gamsph(v$h,v$pars))^(-0.75)/4
Vare<-v$cg-XM%*%solve(t(XM)%*%solve(v$cg,XM),t(XM))
#sig<-mean(sqrt(diag(Vare)))
e<-v$res
sig<-sqrt(sum(e^2)/(n-3))
gdd<-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25+e*sig/sqrt(diag(Vare))
r1<-v$loores[row(v$loores)<col(v$loores)]
tloores<-t(v$loores)
r2<-tloores[row(tloores)<col(tloores)]
resi<-v$loores-v$loores
resi[row(resi)<col(resi)]<-v$res
resi<-resi+t(resi)
n0<-length(v$lofa)
xn<-v$data[,c(2,1)]
xy<-matrix(0,n,4)
te<-crossprod(matrix(1,1,n0),t(xn[,1]))
xy[,1]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,1]),matrix(1,1,n0))
xy[,2]<-te[row(te)<col(te)]
te<-crossprod(matrix(1,1,n0),t(xn[,2]))
xy[,3]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,2]),matrix(1,1,n0))
xy[,4]<-te[row(te)<col(te)]
if(!missing(xyi)){
ix<-ceiling(sqrt((2*(xyi)+0.25))-0.5)+1
iy<-(xyi)-ceiling(sqrt((2*(xyi)+0.25))-0.5)/2*(ceiling(sqrt((2*(xyi)+0.25))-0.5)-1)
nl<-n
#*(n-1)/2
ind1<-ceiling(sqrt((2*(1:nl)+0.25))-0.5)+1
ind2<-(1:nl)-ceiling(sqrt((2*(1:nl)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:nl)+0.25))-0.5)-1)
}
paro<-par(no.readonly=TRUE)
par(mfrow=c(2,2), mar = c(3,2,2,1)+.1, bg="white")
#EJP: moved (1,1) to be first plotted:
#graphsheet(win.width=0.5,win.height=0.5,win.left=0,win.top=0,Name="1) MAP")
# plot map view as left plot in first row
dg<-1:length(v$lofa)
for(i in 1:length(v$lofa)){
dg[i]<-sum((0.822*(gamsph(v$h,c(v$tha[i,1],v$tha[i,2],v$tha[i,3]))^0.25-gamsph(v$h,v$pars)^0.25))^2)
}
#if(! exists("zmv")) zmv<-0
#if(zmv==0) plot(xn[,2],xn[,1],xlim=c(-0.6,1.1),ylim=c(-0.28, 0.28))
#if(zmv==0) plot(xn[,2],xn[,1],xlim=c(-1.1,1.1),ylim=c(-0.28, 0.28),lwd=3)
if (zmv==0 && !is.null(region))
plot(xn[,2],xn[,1],xlim=c(min(region[,1]), max(region[,1])),
ylim=c(min(region[,2]), max(region[,2])),lwd=3,
asp = 1, xlab = "", ylab = "")
else
plot(xn[,2],xn[,1],xlim=c(min(xn[,2]), max(xn[,2])),
ylim=c(min(xn[,1]), max(xn[,1])),lwd=3,
asp = 1, xlab = "", ylab = "")
#lolo<-lof[1,1]-lofa
z<-xn[,1]
if(zmv>0){
z<-switch(zmv,v$data[,3],v$cda,v$lofa,dg)
inc<-0.25
rmin<-0.03
epsi<-(max(z)-min(z))/(inc/rmin-1)
# symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,xlim=c(-0.63,1.14),ylim=c(-0.3, 0.28))
if(zmv>0 && !is.null(region))
symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,
xlim=c(min(region[,1]), max(region[,1])),
ylim=c(min(region[,2]), max(region[,2])),lwd=3)
else
symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,
xlim=c(min(xn[,2]), max(xn[,2])),
ylim=c(min(xn[,1]), max(xn[,1])),lwd=3)
# symbols(xn[,2],xn[,1],circles=z-min(z)+epsi,inches=inc,lwd=3)
}
if(!is.null(region)) polygon(region[,1],region[,2],density=0,col=2)
title(paste("Map View",switch(zmv+1,'','(y)',"(Cook's Distance)",'(Mahalanobis Distance)',"(Cook's Distance)")))
#gsdmpv<-dev.cur()
if(!missing(xyi)){
segments(xy[xyi,3],xy[xyi,1],xy[xyi,4],xy[xyi,2],pch=16,col=3,lwd=3)
points(xy[xyi,3],xy[xyi,1],pch=16,col=6)
points(xy[xyi,4],xy[xyi,2],pch=16,col=8)
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[ix,2],xn[ix,1]))
text(xn[ix,2],xn[ix,1]-(max(z)-min(z))/10,paste(ix))
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[iy,2],xn[iy,1]))
text(xn[iy,2],xn[iy,1]-(max(z)-min(z))/10,paste(iy))
}
assign("p11", par(no.readonly=TRUE), envir = .varDiagOptions)
# EJP-end
#graphsheet(win.width=0.8,win.height=1,win.left=0,win.top=0,Name="Interactive Variogram Plot")
#windows(width = 8, height = 5.5,rescale="R")
#windows()
par(mfg=c(2,1))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0,win.top=0.5,Name="3) LOO")
plot(matrix(cbind(v$res,v$res),n*2,1), matrix(cbind(r1,r2),n*2,1),
pch=1, xlab="", ylab="", lwd=1)
lines(c(min(v$res),max(v$res)),c(min(v$res),max(v$res)),col=8)
segments(v$res,r1,v$res,r2)
title("Leave One Out Residuals")
if(!missing(xyi)){
print("xyi")
print(xyi)
points(v$res[xyi],r1[xyi],pch=18,col=3)
points(v$res[xyi],r2[xyi],pch=18,col=5)
points(t(resi[ix,-ix]),t(v$loores[ix,-ix]),pch=16,col=6)
points(t(resi[iy,-iy]),t(v$loores[iy,-iy]),pch=16,col=8)
segments(v$res[xyi],r1[xyi],v$res[xyi],r2[xyi],col=3,lwd=5)
}
assign("p21", par(no.readonly=TRUE), envir = .varDiagOptions)
cv1<-cv
i<-1:n
di<-dim(v$cg)[1]
if(!missing(xyi)){
# di<-dim(v$cg)[1]
# pm<-diag(di)
# pm[xyi,]<-diag(di)[di,]
# pm[di,]<-diag(di)[xyi,]
# cg1<-pm%*%v$cg%*%pm
# i[n]<-xyi
# i[xyi]<-n
# print(max(abs(cv1-cv)))
i<-c(sample(seq(di)[-xyi]),xyi)
cg1<-v$cg[i,i]
infm<-solve(cg1);
cv1<-chol((infm+t(infm))/2);
}
par(mfg=c(2,2))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0.5,win.top=0.5,Name="4) DCR")
x<-((2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)[i]
y<-v$res[i]
cv1<-cv1/cv1[di,di]
plot(cv1%*%x,cv1%*%y,xlab="",ylab="",lwd=1)
if(!missing(xyi))
points(x[n],y[n],pch=16,col=3)
#sm<-lowess(cv1%*%x,cv1%*%y)
#lines(sm$x,sm$y,lwd=3)
glu<-min(cv1%*%x)
glo<-max(cv1%*%x)
lines(c(glu,glo),c(0,0))
title("Decorrelated Residuals")
assign("p22", par(no.readonly=TRUE), envir = .varDiagOptions)
xv<-seq(0.0001,max(v$h),0.01)
par(mfg=c(1,2))
#graphsheet(win.width=0.5,win.height=0.5,win.left=0.5,win.top=0,Name="2) SVC")
plot(v$h,gdd,xlab="",ylab="",lwd=1)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25,lwd=3)
title("Studentized Square Root Cloud")
if(!missing(xyi)){
points(v$h[ind1==ix | ind2 == ix],gdd[ind1==ix | ind2 == ix],pch=16,col=6)
points(v$h[ind1==iy | ind2 == iy],gdd[ind1==iy | ind2 == iy],pch=16,col=8)
points(v$h[xyi],gdd[xyi],pch=16,col=3)
}
assign("p12", par(no.readonly=TRUE), envir = .varDiagOptions)
par(paro)
n
}
CookRLF.varobj<-function(v){
n<-length(v$lofa)
lofa<-matrix(0,n,1)
i1<-matrix(1:n,n,n)
for (k in 1:n){
ii<-(i1[row(i1)<col(i1)]==k)|(t(i1)[row(t(i1))<col(t(i1))]==k)
cgt<-v$cg[!ii,!ii]
rt<-v$y[!ii]^0.25-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h[!ii],v$tha[k,])^0.25
lofa[k]<-rt%*%solve(cgt,rt)
}
dg<-1:length(v$lofa)
for(i in 1:length(v$lofa)){
dg[i]<-sum((0.822*(gamsph(v$h,c(v$tha[i,1],v$tha[i,2],v$tha[i,3]))^0.25-gamsph(v$h,v$pars)^0.25))^2)
}
plot((v$lof[1]-lofa)/v$lof[1]*187/19,dg/(3*v$lof[1])*187,ylab="Cook's Distance",xlab="Reduktion im Lack of Fit")
identify((v$lof[1]-lofa)/v$lof[1]*187/19,dg/(3*v$lof[1])*187)
}
QQVarcloud.varobj<-function(v){
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
plot(qnorm(seq(from=1/(2*n),length=n,by=1/n)),sort(cv%*%v$y),
xlab="quantile of standard normal distribution",
ylab="orderd decorrelated residual")
lines(c(-3,3),c(-3,3),col=8,lwd=3)
apply(t(apply(apply(matrix(rnorm(n*100),ncol=100),2,sort),1,quantile,probs=c(0.05/n,1-0.05/n))),2,lines,x=qnorm(seq(from=1/(2*n),length=n,by=1/n)))
}
QQDecorr.varobj<-function(v){
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
plot(qchisq(seq(from=1/(2*n),length=n,by=1/n),1),sort(v$y/gamsph(v$h,v$pars)),
xlab="quantile of Chi-square distribution",
ylab="ordered value of [Z(s)-Z(s')]^2/(2g(s-s'))")
apply(t(apply(apply(matrix(rchisq(n*100,1),ncol=100),2,sort),1,quantile,probs=c(0.05/n,1-0.05/n))),2,lines,x=qchisq(seq(from=1/(2*n),length=n,by=1/n),1))
lines(c(0,8),c(0,8),col=8,lwd=3)
}
interact.varobj<-function(v,region=NULL,g="s",pchi=0.05,zmv=0){
#Identifikation in studentisierter VC
palette(c("black","cyan","magenta","green3","yellow","blue","white","red"))
n<-length(v$h)
infm<-solve(v$cg);
cv<-chol((infm+t(infm))/2);
XM<-cbind(gamsph1(v$h,v$pars),gamsph2(v$h,v$pars),gamsph3(v$h,v$pars))*(gamsph(v$h,v$pars))^(-0.75)/4
Vare<-v$cg-XM%*%solve(t(XM)%*%solve(v$cg,XM),t(XM))
#sig<-mean(sqrt(diag(Vare)))
e<-v$res
sig<-sqrt(sum(e^2)/(n-3))
gdd<-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25+e*sig/sqrt(diag(Vare))
xn<-v$data[,c(2,1)]
r1<-v$loores[row(v$loores)<col(v$loores)]
tloores<-t(v$loores)
r2<-tloores[row(tloores)<col(tloores)]
resi<-v$loores-v$loores
resi[row(resi)<col(resi)]<-v$res
resi<-resi+t(resi)
n0<-length(v$lofa)
xn<-v$data[,c(2,1)]
xy<-matrix(0,n,4)
te<-crossprod(matrix(1,1,n0),t(xn[,1]))
xy[,1]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,1]),matrix(1,1,n0))
xy[,2]<-te[row(te)<col(te)]
te<-crossprod(matrix(1,1,n0),t(xn[,2]))
xy[,3]<-te[row(te)<col(te)]
te<-crossprod(t(xn[,2]),matrix(1,1,n0))
xy[,4]<-te[row(te)<col(te)]
if(g=="l"){
par(mfrow=c(2,2), mfg=c(2,1))
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
xyi<-identify(matrix(cbind(v$res,v$res),n*2,1),matrix(cbind(r1,r2),n*2,1),plot=FALSE,n=1)
if(xyi>n) xyi<-xyi-n
}
if(g=="m"){
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
ix0<-identify(xn[,2],xn[,1],plot=TRUE,n=1)
points(xn[ix0,2],xn[ix0,1],pch=16,col=6)
ind1<-ceiling(sqrt((2*(1:n)+0.25))-0.5)+1
ind2<-(1:n)-ceiling(sqrt((2*(1:n)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:n)+0.25))-0.5)-1)
par(get("p12", envir = .varDiagOptions))
par(mfrow=c(2,2),mfg=c(1,2),fig=c(0.5,1,0.5,1))
# par(mfg=c(1,2,2,2))
# par(fig=c(0.5,1,0.5,1))
points(v$h[ind1==ix0 | ind2 == ix0],gdd[ind1==ix0 | ind2 == ix0],pch=16,col=6)
par(mfrow=c(2,2), mfg=c(2,1))
#par(p21)
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
points(t(resi[ix0,-ix0]),t(v$loores[ix0,-ix0]),pch=16,col=6)
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
iy0<-identify(xn[,2],xn[,1],plot=FALSE,n=1)
if(length(iy0)>0){
ix<-max(ix0,iy0)
iy<-min(ix0,iy0)
xyi<-(ix-1)*(ix-2)/2+iy}
else{
xyi<-0
# dev.off()
# PlotDiag.varobj(v,region,zmv=zmv)
# par(mfrow=c(2,2))
# par(mfg=c(1,1,2,2))
# par(p11)
# par(fig=c(0,0.5,0.5,1))
# points(xn[ix0,2],xn[ix0,1],pch=16,col=6)
# identify(xn[,2],xn[,1],plot=T,pts=cbind(xn[ix0,2],xn[ix0,1]))
ind1<-ceiling(sqrt((2*(1:n)+0.25))-0.5)+1
ind2<-(1:n)-ceiling(sqrt((2*(1:n)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:n)+0.25))-0.5)-1)
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[ind1==ix0 | ind2 == ix0],gdd[ind1==ix0 | ind2 == ix0],pch=16,col=6)
par(mfrow=c(2,2), mfg=c(2,1))
#par(p21)
par(get("p21", envir = .varDiagOptions))
par(fig=c(0,0.5,0,0.5))
points(t(resi[ix0,-ix0]),t(v$loores[ix0,-ix0]),pch=16,col=6)
}
}
if(g=="s"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
xyi<-identify(v$h,gdd,plot=FALSE,n=1)
}
if(g=="t"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
p<-locator(n=500,type="l",col=4)
m<-length(p$x)
lines(p$x[c(m,1)],p$y[c(m,1)],col=2)
i<-t(outer(v$h,p$x,FUN="-"))/(p$x[c(2:m,1)]-p$x)
gt<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))>=gdd&t((i>=0)&(i<=1)),1,"sum")
s<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))<=gdd&t((i>=0)&(i<=1)),1,"sum")
i0<-(s%%2)|(gt%%2)
dev.off()
PlotDiag.varobj(v,region)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i0],gdd[i0],pch=16,col=3)
polygon(p,density=0,col=4)
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
segments(xy[i0,3],xy[i0,1],xy[i0,4],xy[i0,2],pch=16,col=3,lwd=3)
xyi<-0
}
if(g=="x"){
par(mfrow=c(2,2), mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
i0<-(gdd-(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)/sig>qnorm(1-pchi/2)
i0a<-(-gdd+(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(v$h,v$pars)^0.25)/sig>qnorm(1-pchi/2)
dev.off()
PlotDiag.varobj(v,region,zmv=zmv)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i0],gdd[i0],pch=16,col=3)
points(v$h[i0a],gdd[i0a],pch=16,col=4)
xv<-seq(0.0001,max(v$h),0.01)
# lines(xv,gamsph(xv,v$pars)*qchisq(1-pchi,1),lty=4,lwd=2)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25+sig*qnorm(1-pchi/2),lty=4,lwd=2)
lines(xv,(2^0.25*gamma(0.75)/sqrt(pi))*gamsph(xv,v$pars)^0.25-sig*qnorm(1-pchi/2),lty=4,lwd=2)
par(get("p11", envir = .varDiagOptions))
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
print(xy[i0,])
par(fig=c(0,0.5,0.5,1))
segments(xy[i0,3],xy[i0,1],xy[i0,4],xy[i0,2],pch=16,col=3,lwd=2)
segments(xy[i0a,3],xy[i0a,1],xy[i0a,4],xy[i0a,2],pch=16,col=4,lwd=2)
xyi<-0
}
if(g=="n"){
par(mfrow=c(2,2), mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
p<-locator(n=500,type="l",pch=16,col=4)
m<-length(p$x)
lines(p$x[c(m,1)],p$y[c(m,1)],col=2)
i<-t(outer(xn[,2],p$x,FUN="-"))/(p$x[c(2:m,1)]-p$x)
gt<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))>=xn[,1]&t((i>=0)&(i<=1)),1,"sum")
s<-apply(t((i*(p$y[c(2:m,1)]-p$y)+p$y))<=xn[,1]&t((i>=0)&(i<=1)),1,"sum")
i0<-(s%%2)|(gt%%2)
nl<-length(v$h)
ind1<-ceiling(sqrt((2*(1:nl)+0.25))-0.5)+1
ind2<-(1:nl)-ceiling(sqrt((2*(1:nl)+0.25))-0.5)/2*(ceiling(sqrt((2*(1:nl)+0.25))-0.5)-1)
i00<-match(ind1,(1:n0)[i0],nomatch=FALSE)&match(ind2,(1:n0)[i0],nomatch=FALSE)
dev.off()
PlotDiag.varobj(v,region)
par(mfg=c(1,2))
#par(p12)
par(get("p12", envir = .varDiagOptions))
par(fig=c(0.5,1,0.5,1))
points(v$h[i00],gdd[i00],pch=16,col=3)
par(mfg=c(1,1))
par(get("p11", envir = .varDiagOptions))
par(fig=c(0,0.5,0.5,1))
polygon(p,density=0,col=4)
segments(xy[i00,3],xy[i00,1],xy[i00,4],xy[i00,2],pch=16,col=3,lwd=3)
xyi = 0
}
#print(xyi)
if(g!="t"&g!="x"&g!="n"& xyi>0){
dev.off()
PlotDiag.varobj(v,region,xyi=xyi,zmv=zmv)
}
xyi}
|
component <- data.frame(supplier=c(1,2,2,3,1,2,3,2,3,1,3,1,3,2,2,3,3,2,2,3,4,3,3,1,4,3,3,2,2,4,3,4,2,2,4,3,2,1,3,3,4,2,3,2,2,2,
3,2,3,3,4,2,1,4,3,2,3,3,2,3,3,3,1,3,2,3,2,3,2,3,2,3,1,3,3,2,4,3,1,3,3,2,3,2,2,2,3,3,3,3,2,2,
2,2,2,2,2,3,2,1,3,3,1,2,3,3,2,1,2,1,2,3,4,2,2,4,3,3,2,3,2,2,3,2,3,4,3,1,2,3),
lifetime=c(8.38,9.98,9.54,10.90,9.82,9.46,9.57,11.51,9.62,11.60,10.63,11.21,10.66,14.08,11.94,
10.29,10.29,10.60,9.61,10.10,8.62,9.12,9.98,8.76,10.01,10.81,12.10,8.81,10.84,13.09,
10.84,9.96,9.72,8.40,8.80,8.99,8.62,9.86,12.73,10.44,10.83,9.79,9.97,9.09,10.73,
10.45,8.89,8.88,8.87,11.15,9.99,8.86,8.85,9.02,9.78,12.01,9.81,9.32,9.72,8.43,
10.11,11.55,10.67,10.03,10.03,10.73,9.96,8.76,8.81,11.06,9.43,8.57,12.09,10.37,8.89,
8.67,8.98,12.85,8.51,10.41,8.95,9.16,10.24,8.76,9.67,9.54,8.93,11.47,9.58,11.13,
12.85,12.41,10.92,8.47,9.66,9.01,10.10,9.70,9.95,9.09,9.74,10.60,9.94,8.73,12.68,
9.14,9.09,9.80,9.54,9.11,8.51,9.80,9.04,11.17,9.74,13.94,9.03,8.41,9.69,9.62,
8.93,10.68,9.51,12.21,11.68,11.01,13.93,8.62,10.07,9.59))
| /R/componentdata.R | no_license | cran/doex | R | false | false | 1,023 | r | component <- data.frame(supplier=c(1,2,2,3,1,2,3,2,3,1,3,1,3,2,2,3,3,2,2,3,4,3,3,1,4,3,3,2,2,4,3,4,2,2,4,3,2,1,3,3,4,2,3,2,2,2,
3,2,3,3,4,2,1,4,3,2,3,3,2,3,3,3,1,3,2,3,2,3,2,3,2,3,1,3,3,2,4,3,1,3,3,2,3,2,2,2,3,3,3,3,2,2,
2,2,2,2,2,3,2,1,3,3,1,2,3,3,2,1,2,1,2,3,4,2,2,4,3,3,2,3,2,2,3,2,3,4,3,1,2,3),
lifetime=c(8.38,9.98,9.54,10.90,9.82,9.46,9.57,11.51,9.62,11.60,10.63,11.21,10.66,14.08,11.94,
10.29,10.29,10.60,9.61,10.10,8.62,9.12,9.98,8.76,10.01,10.81,12.10,8.81,10.84,13.09,
10.84,9.96,9.72,8.40,8.80,8.99,8.62,9.86,12.73,10.44,10.83,9.79,9.97,9.09,10.73,
10.45,8.89,8.88,8.87,11.15,9.99,8.86,8.85,9.02,9.78,12.01,9.81,9.32,9.72,8.43,
10.11,11.55,10.67,10.03,10.03,10.73,9.96,8.76,8.81,11.06,9.43,8.57,12.09,10.37,8.89,
8.67,8.98,12.85,8.51,10.41,8.95,9.16,10.24,8.76,9.67,9.54,8.93,11.47,9.58,11.13,
12.85,12.41,10.92,8.47,9.66,9.01,10.10,9.70,9.95,9.09,9.74,10.60,9.94,8.73,12.68,
9.14,9.09,9.80,9.54,9.11,8.51,9.80,9.04,11.17,9.74,13.94,9.03,8.41,9.69,9.62,
8.93,10.68,9.51,12.21,11.68,11.01,13.93,8.62,10.07,9.59))
|
#'@name correlation
#'@import corrplot
#'@import data.table
#'@export
correlation_plot <- function(DF3){
cor_cts <- cor(DF3, method="pearson")
print(cor_cts)
cols<- colorRampPalette(c("blue", "white", "red"))(20)
corrplot(cor_cts, type="upper", order="hclust", hclust.method= "ward", col=cols, addCoef.col = "black", method = "color", tl.col="black", tl.srt=45, tl.cex = 0.6, main = "Pearson correlation of all the samples", mar=c(2,2,5,2))
}
| /iSeqQC_shinyapp/R/correlation.R | permissive | novapyth/iSeqQC | R | false | false | 452 | r | #'@name correlation
#'@import corrplot
#'@import data.table
#'@export
correlation_plot <- function(DF3){
cor_cts <- cor(DF3, method="pearson")
print(cor_cts)
cols<- colorRampPalette(c("blue", "white", "red"))(20)
corrplot(cor_cts, type="upper", order="hclust", hclust.method= "ward", col=cols, addCoef.col = "black", method = "color", tl.col="black", tl.srt=45, tl.cex = 0.6, main = "Pearson correlation of all the samples", mar=c(2,2,5,2))
}
|
#' Nordpred-object with fit of power5 and poisson Age- Period-Cohort models and
#' related predictions for use on of cancer incidence data
#'
#' \code{nordpred} uses the power5 and poisson Age-Period-Cohort (APC) models to
#' calculate prediction of cancer incidence and mortality
#' This class of objects is returned by the nordpred.estimate class of functions
#' to represent a fit of power5 and poisson Age-Period-Cohort models for
#' prediction of cancer incidence.
#' Objects of this class have methods \code{\link{print.nordpred}},
#' \code{\link{summary.nordpred}} and \code{\link{plot.nordpred}}.
#'
#' @section Components:
#' \describe{
#' \item{predictions}{A \code{data.frame} with forecasted number of cases}
#' \item{pyr}{A \code{data.frame} with observed and forecasted person years}
#' \item{nopred}{Numer of period predicted}
#' \item{noperiod}{Number of periods used in estimate}
#' \item{gofpvalue}{P-value for goodness of fit}
#' \item{recent}{Indicator for project of avarage trend or use the slope
#' for the last ten years? If \code{recdent = FALSE}, avarage trend for the whole
#' observation period have been used, and if \code{recent = TRUE} the slope from
#' the last ten years have been used}
#' \item{pvaluerecent}{P-value for use of recent trend based on a significance
#' test for departure from linear trend}
#' \item{cuttrend}{Degree of trend cut in predictions}
#' \item{startusage}{Youngest age group which uses regression model as basis for
#' predicted rates}
#' \item{startestage}{Youngest age group which have been included in the
#' regression model. Predictions for age groups below this limit it based on
#' average rates from the last 10 years.}
#' \item{glm}{Fitted \code{\link{glm}}-object}
#' }
#'
#' The object will also contain the following (see \code{\link{lm}}):
#' \code{formula}, \code{terms}, \code{assign} and \code{call}.
#'
#' @references
#' \itemize{
#' \item A website for nordpred is available at:
#' \url{http://www.kreftregisteret.no/software/nordpred/}
#' \item Background for the methods can be found in: Moller B., Fekjaer H., Hakulinen T.,
#' Sigvaldason H, Storm H. H., Talback M. and Haldorsen T 'Prediction of cancer
#' incidence in the Nordic countries: Empirical comparison of different approaches'
#' Statistics in Medicine 2003; 22:2751-2766
#' \item An application of the function, using all the default settings, can be
#' found in: Moller B, Fekjaer H, Hakulinen T, Tryggvadottir L, Storm HH, Talback M,
#' Haldorsen T. Prediction of cancer incidence in the Nordic countries up to the
#' year 2020. Eur J Cancer Prev Suppl 2002; 11: S1-S96
#' }
#'
#' @author Harald Fekjaer and Bjorn Moller (Cancer Registry of Norway)
#'
#' @section Note for S-plus:
#' Powerlink is made via a special modification in S-PLUS. This works fine
#' for the point estimates, but the variance estimates found via the glm-objects
#' are wrong. For variance estimates, we would rather recommend using R.
#'
#' @family nordpred
#' @name nordpred.object
NULL
| /R/nordpred.object.R | no_license | ondiekhann/nordpred | R | false | false | 3,042 | r |
#' Nordpred-object with fit of power5 and poisson Age- Period-Cohort models and
#' related predictions for use on of cancer incidence data
#'
#' \code{nordpred} uses the power5 and poisson Age-Period-Cohort (APC) models to
#' calculate prediction of cancer incidence and mortality
#' This class of objects is returned by the nordpred.estimate class of functions
#' to represent a fit of power5 and poisson Age-Period-Cohort models for
#' prediction of cancer incidence.
#' Objects of this class have methods \code{\link{print.nordpred}},
#' \code{\link{summary.nordpred}} and \code{\link{plot.nordpred}}.
#'
#' @section Components:
#' \describe{
#' \item{predictions}{A \code{data.frame} with forecasted number of cases}
#' \item{pyr}{A \code{data.frame} with observed and forecasted person years}
#' \item{nopred}{Numer of period predicted}
#' \item{noperiod}{Number of periods used in estimate}
#' \item{gofpvalue}{P-value for goodness of fit}
#' \item{recent}{Indicator for project of avarage trend or use the slope
#' for the last ten years? If \code{recdent = FALSE}, avarage trend for the whole
#' observation period have been used, and if \code{recent = TRUE} the slope from
#' the last ten years have been used}
#' \item{pvaluerecent}{P-value for use of recent trend based on a significance
#' test for departure from linear trend}
#' \item{cuttrend}{Degree of trend cut in predictions}
#' \item{startusage}{Youngest age group which uses regression model as basis for
#' predicted rates}
#' \item{startestage}{Youngest age group which have been included in the
#' regression model. Predictions for age groups below this limit it based on
#' average rates from the last 10 years.}
#' \item{glm}{Fitted \code{\link{glm}}-object}
#' }
#'
#' The object will also contain the following (see \code{\link{lm}}):
#' \code{formula}, \code{terms}, \code{assign} and \code{call}.
#'
#' @references
#' \itemize{
#' \item A website for nordpred is available at:
#' \url{http://www.kreftregisteret.no/software/nordpred/}
#' \item Background for the methods can be found in: Moller B., Fekjaer H., Hakulinen T.,
#' Sigvaldason H, Storm H. H., Talback M. and Haldorsen T 'Prediction of cancer
#' incidence in the Nordic countries: Empirical comparison of different approaches'
#' Statistics in Medicine 2003; 22:2751-2766
#' \item An application of the function, using all the default settings, can be
#' found in: Moller B, Fekjaer H, Hakulinen T, Tryggvadottir L, Storm HH, Talback M,
#' Haldorsen T. Prediction of cancer incidence in the Nordic countries up to the
#' year 2020. Eur J Cancer Prev Suppl 2002; 11: S1-S96
#' }
#'
#' @author Harald Fekjaer and Bjorn Moller (Cancer Registry of Norway)
#'
#' @section Note for S-plus:
#' Powerlink is made via a special modification in S-PLUS. This works fine
#' for the point estimates, but the variance estimates found via the glm-objects
#' are wrong. For variance estimates, we would rather recommend using R.
#'
#' @family nordpred
#' @name nordpred.object
NULL
|
# T03_Matrix_arrays.R - DESC
# T03_Matrix_arrays.R
# Copyright 2011-13 JRC FishReg. Distributed under the GPL 2 or later
# Maintainer: FishReg, JRC
# Introduction to R for Fisheries Science
# MATRIX
# A 2D structure of either numeric or character elements
# Constructed using matrix()
matrix(rnorm(10), ncol=10, nrow=10)
mat <- matrix(rnorm(100), ncol=10, nrow=10)
# Subsetting as in df
mat[1, 2]
mat[1, ,drop=FALSE]
mat[1:4,]
# Get size using dim
dim(mat)
length(mat)
# R works column first, unless instructed otherwise
a <- matrix(1:16, nrow=4)
b <- matrix(1:16, nrow=4, byrow=TRUE)
# An important method for matrices is apply()
mat <- matrix(1:10, ncol=10, nrow=10)
apply(mat, 2, sum)
apply(mat, 1, sum)
# Arithmetics work element-wise
mat + 2
mat * 2
mat - mat
# but matrix algebra is also defined
mat %*% 1:10
# ARRAY
# An array is an n-dimensional extension of a matrix
# Created by array(), specifying dim and dimnames
array(1:100, dim=c(5, 5, 4))
arr <- array(1:25, dim=c(5, 5, 4))
# Subsetting works as in matrix, on all dims (count the commas)
arr[1:3, 1:5, 4]
# but be careful with dimensions collapsing
arr[1,,]
arr[1,3,]
# Arithmetic (element by element) is well defined
arr * 2
arr + (arr / 2)
# apply is our friend here too
apply(arr, 2, sum)
apply(arr, 2:3, sum) | /Day01/T03_Matrix_arrays.R | no_license | ariffahmi7/RforFisheries | R | false | false | 1,318 | r | # T03_Matrix_arrays.R - DESC
# T03_Matrix_arrays.R
# Copyright 2011-13 JRC FishReg. Distributed under the GPL 2 or later
# Maintainer: FishReg, JRC
# Introduction to R for Fisheries Science
# MATRIX
# A 2D structure of either numeric or character elements
# Constructed using matrix()
matrix(rnorm(10), ncol=10, nrow=10)
mat <- matrix(rnorm(100), ncol=10, nrow=10)
# Subsetting as in df
mat[1, 2]
mat[1, ,drop=FALSE]
mat[1:4,]
# Get size using dim
dim(mat)
length(mat)
# R works column first, unless instructed otherwise
a <- matrix(1:16, nrow=4)
b <- matrix(1:16, nrow=4, byrow=TRUE)
# An important method for matrices is apply()
mat <- matrix(1:10, ncol=10, nrow=10)
apply(mat, 2, sum)
apply(mat, 1, sum)
# Arithmetics work element-wise
mat + 2
mat * 2
mat - mat
# but matrix algebra is also defined
mat %*% 1:10
# ARRAY
# An array is an n-dimensional extension of a matrix
# Created by array(), specifying dim and dimnames
array(1:100, dim=c(5, 5, 4))
arr <- array(1:25, dim=c(5, 5, 4))
# Subsetting works as in matrix, on all dims (count the commas)
arr[1:3, 1:5, 4]
# but be careful with dimensions collapsing
arr[1,,]
arr[1,3,]
# Arithmetic (element by element) is well defined
arr * 2
arr + (arr / 2)
# apply is our friend here too
apply(arr, 2, sum)
apply(arr, 2:3, sum) |
# ==============================================================================
#
# Regression for ladder fuel metrics from trad, ALS, UAV, ZEB, and banner
#
# ==============================================================================
#
# Author: Brieanne Forbes
#
# Created: 21 Jan 2021
#
# ==============================================================================
#
# Known problems:
#
#
#
# ==============================================================================
library(tidyverse)
library(ggplot2)
library(reshape2)
library(glue)
library(FSA)
library(ggpubr)
library(RColorBrewer)
library(Metrics)
library(gridExtra)
# ================================= User inputs ================================
# rbr_files <- list.files('D:/Analyses/ladder_fuel_full_process_take4/not_filtered/',
# pattern = '210331_RBR_3x3avg_rf_predict.csv',
# full.names = T)
rbr_files <- list.files('D:/Analyses/ladder_fuel_full_process_take4/filtered_zero_and_above/',
pattern = '210331_RBR_3x3avg_rf_predict.csv',
full.names = T)
#
# wrap_out <- 'D:/Analyses/ladder_fuel_full_process_take4/not_filtered/ladder-fuel_linear-regression_210405.png'
wrap_out <- 'D:/Analyses/ladder_fuel_full_process_take4/filtered_zero_and_above/ladder-fuel_linear-regression_filtered_210408.png'
#presentation_out <- '~/desktop/Analyses/output/presentation_figure_210122.png'
# =============================== Combine data ================================
als <- read_csv(rbr_files[1]) %>%
add_column(method='ALS')
banner <- read_csv(rbr_files[2])%>%
add_column(method='Banner')
zeb <- read_csv(rbr_files[3])%>%
add_column(method='HMLS')
tls <- read_csv(rbr_files[4])%>%
add_column(method='TLS')
uas <- read_csv(rbr_files[5])%>%
add_column(method='UAS')
rbr_actual <- als %>%
select(plot, rbr_actual) %>%
add_column(method='Actual') %>%
rename(rbr=rbr_actual)
tidy_data <- als %>%
add_row(banner) %>%
add_row(tls) %>%
add_row(uas) %>%
add_row(zeb) %>%
select(plot, method, rbr_predict) %>%
rename(rbr=rbr_predict) %>%
add_row(rbr_actual)
tls_rbr <- tls %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_tls = rbr_predict)
uas_rbr <- uas %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_uas = rbr_predict)
zeb_rbr <- zeb %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_zeb = rbr_predict)
banner_rbr <- banner %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_banner = rbr_predict)
rbr <- als %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_als = rbr_predict) %>%
full_join(tls_rbr, by=c('plot', 'rbr_actual')) %>%
full_join(uas_rbr, by=c('plot', 'rbr_actual')) %>%
full_join(zeb_rbr, by=c('plot', 'rbr_actual')) %>%
full_join(banner_rbr, by=c('plot', 'rbr_actual'))
rbr$rbr_class <-
cut(
rbr$rbr_actual,
breaks = c( 0, 35, 130, 298, Inf),
label = c('NC', 'Low', 'Medium', 'High')
)
#===============================Set theme=======================================
theme_set(
theme(
text = element_text(family = 'serif', face = 'plain'),
axis.title = element_text(size = 16),
axis.text = element_text(size = 20),
line = element_line(size = 1.5),
axis.line = element_line(size = 1.5),
panel.background = element_rect(color = 'white'),
panel.border = element_rect(colour = 'black', fill = NA, size = 1.5),
legend.title = element_text(size = 24),
legend.text = element_text(size = 24),
legend.spacing = unit(0, "cm"),
legend.margin = margin(0, 5, 0, 5),
legend.text.align = 0,
plot.title = element_text(size = 25, face = "bold")
)
)
rbr$rbr_class <- factor(rbr$rbr_class, levels = c("High", "Medium", "Low", "NC"))
#=========================3 min Competition Figure===============================
# presentation_figure <- ggplot(data = rbr, aes(y=rbr_actual)) +
# geom_abline(col='grey')+
# geom_hline(yintercept = c(35, 130, 298), linetype="dashed", col='grey')+
# geom_vline(xintercept = c(35, 130, 298), linetype="dashed", col='grey')+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_tls, color='#E15759'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_als, color='#F28E2B'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_zeb, color='#F1CE63'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_uas, color='#59A14F'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_banner, color='#499894')) +
# xlab('Predicted burn severity \n(from airborne and ground methods)') +
# ylab('Actual burn severity \n(from satellite)') +
# geom_point(aes(x=rbr_predict_tls, color='#E15759'),size=1.5)+
# geom_point(aes(x=rbr_predict_als, color='#F28E2B'),size=1.5)+
# geom_point(aes(x=rbr_predict_zeb, color='#F1CE63'),size=1.5)+
# geom_point(aes(x=rbr_predict_uas, color='#59A14F'),size=1.5)+
# geom_point(aes(x=rbr_predict_banner, color='#499894'),size=1.5)+
# scale_color_manual(values = c('#E15759','#F28E2B','#F1CE63','#59A14F','#499894'),
# labels=c('TLS\n(r2=0.834)\n', 'ALS\n(r2= 0.830)\n',
# 'ZEB\n(r2= 0.828)\n', 'UAS\n(r2= 0.739)\n',
# 'Banner\n(r2= 0.783)\n'))+
# xlim(-5, 350) +
# ylim(-5, 350) +
# labs(colour="Method") +
# annotate('text', x=330, y=7, label='very\nlow', angle=90, size=5)+
# annotate('text', x=340, y=80, label='low', angle=90, size=5)+
# annotate('text', x=340, y=215, label='medium', angle=90, size=5)+
# annotate('text', x=335, y=335, label='high', angle=45, size=5)+
# annotate('text', x=7, y=330, label='very\nlow', size=5)+
# annotate('text', x=80, y=340, label='low', size=5)+
# annotate('text', x=215, y=340, label='medium', size=5) +
# theme(legend.position = 'none', axis.title = element_text(size = 20),
# axis.line=element_blank(),axis.text.x=element_blank(),
# axis.text.y=element_blank(),axis.ticks=element_blank(),)
# presentation_figure
#
# ggsave(
# presentation_out,
# plot=presentation_figure,
# width = 5,
# height = 4.5,
# units = 'in',
# dpi = 300)
#==============================Wrapped Figure===================================
#
tls_lm <- lm(rbr_actual~rbr_predict_tls, data=rbr) #P=1.031e-10 *** Rsq=0.8358
summary(tls_lm)
equation_tls <- glue('y = {round(tls_lm$coefficients[2],2)}x + {round(tls_lm$coefficients[1], 2)}')
equation_tls #y = 1.27x + -35.97
wrap_tls <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_tls), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_tls, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.24x + -33.29\nRsq = 0.82 \np<0.001 ', size=5)+
ggtitle("TLS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_tls
als_lm <- lm(rbr_actual~rbr_predict_als, data=rbr) #P=8.199e-11 *** Rsq=0.8391
summary(als_lm)
equation_als <- glue('y = {round(als_lm$coefficients[2],2)}x + {round(als_lm$coefficients[1], 2)}')
equation_als #y = 1.41x + -59.34
wrap_als <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_als), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_als, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.42x + -60.64\nRsq = 0.83\np<0.001', size=5)+
ggtitle("ALS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_als
#
zeb_lm <- lm(rbr_actual~rbr_predict_zeb, data=rbr) #P=7.074e-10 *** Rsq=0.8203
summary(zeb_lm)
equation_zeb <- glue('y = {round(zeb_lm$coefficients[2],2)}x + {round(zeb_lm$coefficients[1], 2)}')
equation_zeb #y = 1.48x + -64.19
wrap_zeb <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_zeb), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_zeb, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.49x + -65.29\nRsq = 0.83\np<0.001', size=5)+
ggtitle("HMLS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_zeb
uas_lm <- lm(rbr_actual~rbr_predict_uas, data=rbr) #P= 7.608e-06 *** Rsq= 0.7304
summary(uas_lm)
equation_uas <- glue('y = {round(uas_lm$coefficients[2],2)}x + {round(uas_lm$coefficients[1], 2)}')
equation_uas #y = 1.26x + -32.58
wrap_uas <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_uas), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_uas, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.3x + -35.54\nRsq = 0.80\np<0.001', size=5)+
ggtitle("UAS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_uas
#
banner_lm <- lm(rbr_actual~rbr_predict_banner, data=rbr) #P= 1.463e-09 *** Rsq=0.8082
summary(banner_lm)
equation_banner <- glue('y = {round(banner_lm$coefficients[2],2)}x + {round(banner_lm$coefficients[1], 2)}')
equation_banner #y = 1.35x + -50.5
wrap_banner <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_banner), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_banner, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="RBR Class") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.32x + -45.62\nRsq = 0.77\np<0.001', size=5)+
ggtitle("Banner")+
theme(legend.position = 'bottom', axis.title = element_blank())+
guides(color = guide_legend(nrow = 3))
wrap_banner
wrap_all <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_tls, color='#E15759'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_als, color='#F28E2B'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_zeb, color='#F1CE63'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_uas, color='#59A14F'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_banner, color='#499894'),size=1) +
geom_point(aes( x=rbr_predict_tls, color='#E15759'),size=2.5)+
geom_point(aes( x=rbr_predict_zeb, color='#F1CE63'),size=2.5)+
geom_point(aes( x=rbr_predict_als, color='#F28E2B'),size=2.5)+
geom_point(aes( x=rbr_predict_uas, color='#59A14F'),size=2.5)+
geom_point(aes( x=rbr_predict_banner, color='#499894'),size=2.5)+
scale_color_manual(values = c('#0D0887FF','#900DA4FF','#E16462FF','#FCCE25FF','#21908CFF'),
labels=c('TLS', 'MLS',
'ALS', 'UAS',
'Banner'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Method") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
ggtitle("All Methods") +
theme(legend.position = 'bottom', axis.title = element_blank())+
guides(color = guide_legend(nrow = 3))
wrap_all
wrap_figure <-
ggarrange(
wrap_tls,
wrap_zeb,
wrap_als,
wrap_uas,
wrap_banner,
wrap_all,
ncol = 2,
nrow = 3,
widths = c(4.5, 4.5),
heights = c(4.5, 4.5, 5.25)
)
wrap_figure <- annotate_figure(wrap_figure,
bottom = text_grob('Predicted RBR',
family = 'serif',
size = 32),
left = text_grob('Actual RBR',
family = 'serif',
size = 32,
rot = 90))
ggsave(
wrap_out,
plot=wrap_figure,
width = 13,
height = 20,
units = 'in',
dpi = 300 )
| /ladder_fuel_regression(6).R | no_license | forbesb97/forbesb_b_manuscript | R | false | false | 16,640 | r | # ==============================================================================
#
# Regression for ladder fuel metrics from trad, ALS, UAV, ZEB, and banner
#
# ==============================================================================
#
# Author: Brieanne Forbes
#
# Created: 21 Jan 2021
#
# ==============================================================================
#
# Known problems:
#
#
#
# ==============================================================================
library(tidyverse)
library(ggplot2)
library(reshape2)
library(glue)
library(FSA)
library(ggpubr)
library(RColorBrewer)
library(Metrics)
library(gridExtra)
# ================================= User inputs ================================
# rbr_files <- list.files('D:/Analyses/ladder_fuel_full_process_take4/not_filtered/',
# pattern = '210331_RBR_3x3avg_rf_predict.csv',
# full.names = T)
rbr_files <- list.files('D:/Analyses/ladder_fuel_full_process_take4/filtered_zero_and_above/',
pattern = '210331_RBR_3x3avg_rf_predict.csv',
full.names = T)
#
# wrap_out <- 'D:/Analyses/ladder_fuel_full_process_take4/not_filtered/ladder-fuel_linear-regression_210405.png'
wrap_out <- 'D:/Analyses/ladder_fuel_full_process_take4/filtered_zero_and_above/ladder-fuel_linear-regression_filtered_210408.png'
#presentation_out <- '~/desktop/Analyses/output/presentation_figure_210122.png'
# =============================== Combine data ================================
als <- read_csv(rbr_files[1]) %>%
add_column(method='ALS')
banner <- read_csv(rbr_files[2])%>%
add_column(method='Banner')
zeb <- read_csv(rbr_files[3])%>%
add_column(method='HMLS')
tls <- read_csv(rbr_files[4])%>%
add_column(method='TLS')
uas <- read_csv(rbr_files[5])%>%
add_column(method='UAS')
rbr_actual <- als %>%
select(plot, rbr_actual) %>%
add_column(method='Actual') %>%
rename(rbr=rbr_actual)
tidy_data <- als %>%
add_row(banner) %>%
add_row(tls) %>%
add_row(uas) %>%
add_row(zeb) %>%
select(plot, method, rbr_predict) %>%
rename(rbr=rbr_predict) %>%
add_row(rbr_actual)
tls_rbr <- tls %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_tls = rbr_predict)
uas_rbr <- uas %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_uas = rbr_predict)
zeb_rbr <- zeb %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_zeb = rbr_predict)
banner_rbr <- banner %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_banner = rbr_predict)
rbr <- als %>%
select(plot,rbr_actual, rbr_predict) %>%
rename(rbr_predict_als = rbr_predict) %>%
full_join(tls_rbr, by=c('plot', 'rbr_actual')) %>%
full_join(uas_rbr, by=c('plot', 'rbr_actual')) %>%
full_join(zeb_rbr, by=c('plot', 'rbr_actual')) %>%
full_join(banner_rbr, by=c('plot', 'rbr_actual'))
rbr$rbr_class <-
cut(
rbr$rbr_actual,
breaks = c( 0, 35, 130, 298, Inf),
label = c('NC', 'Low', 'Medium', 'High')
)
#===============================Set theme=======================================
theme_set(
theme(
text = element_text(family = 'serif', face = 'plain'),
axis.title = element_text(size = 16),
axis.text = element_text(size = 20),
line = element_line(size = 1.5),
axis.line = element_line(size = 1.5),
panel.background = element_rect(color = 'white'),
panel.border = element_rect(colour = 'black', fill = NA, size = 1.5),
legend.title = element_text(size = 24),
legend.text = element_text(size = 24),
legend.spacing = unit(0, "cm"),
legend.margin = margin(0, 5, 0, 5),
legend.text.align = 0,
plot.title = element_text(size = 25, face = "bold")
)
)
rbr$rbr_class <- factor(rbr$rbr_class, levels = c("High", "Medium", "Low", "NC"))
#=========================3 min Competition Figure===============================
# presentation_figure <- ggplot(data = rbr, aes(y=rbr_actual)) +
# geom_abline(col='grey')+
# geom_hline(yintercept = c(35, 130, 298), linetype="dashed", col='grey')+
# geom_vline(xintercept = c(35, 130, 298), linetype="dashed", col='grey')+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_tls, color='#E15759'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_als, color='#F28E2B'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_zeb, color='#F1CE63'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_uas, color='#59A14F'))+
# geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_banner, color='#499894')) +
# xlab('Predicted burn severity \n(from airborne and ground methods)') +
# ylab('Actual burn severity \n(from satellite)') +
# geom_point(aes(x=rbr_predict_tls, color='#E15759'),size=1.5)+
# geom_point(aes(x=rbr_predict_als, color='#F28E2B'),size=1.5)+
# geom_point(aes(x=rbr_predict_zeb, color='#F1CE63'),size=1.5)+
# geom_point(aes(x=rbr_predict_uas, color='#59A14F'),size=1.5)+
# geom_point(aes(x=rbr_predict_banner, color='#499894'),size=1.5)+
# scale_color_manual(values = c('#E15759','#F28E2B','#F1CE63','#59A14F','#499894'),
# labels=c('TLS\n(r2=0.834)\n', 'ALS\n(r2= 0.830)\n',
# 'ZEB\n(r2= 0.828)\n', 'UAS\n(r2= 0.739)\n',
# 'Banner\n(r2= 0.783)\n'))+
# xlim(-5, 350) +
# ylim(-5, 350) +
# labs(colour="Method") +
# annotate('text', x=330, y=7, label='very\nlow', angle=90, size=5)+
# annotate('text', x=340, y=80, label='low', angle=90, size=5)+
# annotate('text', x=340, y=215, label='medium', angle=90, size=5)+
# annotate('text', x=335, y=335, label='high', angle=45, size=5)+
# annotate('text', x=7, y=330, label='very\nlow', size=5)+
# annotate('text', x=80, y=340, label='low', size=5)+
# annotate('text', x=215, y=340, label='medium', size=5) +
# theme(legend.position = 'none', axis.title = element_text(size = 20),
# axis.line=element_blank(),axis.text.x=element_blank(),
# axis.text.y=element_blank(),axis.ticks=element_blank(),)
# presentation_figure
#
# ggsave(
# presentation_out,
# plot=presentation_figure,
# width = 5,
# height = 4.5,
# units = 'in',
# dpi = 300)
#==============================Wrapped Figure===================================
#
tls_lm <- lm(rbr_actual~rbr_predict_tls, data=rbr) #P=1.031e-10 *** Rsq=0.8358
summary(tls_lm)
equation_tls <- glue('y = {round(tls_lm$coefficients[2],2)}x + {round(tls_lm$coefficients[1], 2)}')
equation_tls #y = 1.27x + -35.97
wrap_tls <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_tls), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_tls, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.24x + -33.29\nRsq = 0.82 \np<0.001 ', size=5)+
ggtitle("TLS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_tls
als_lm <- lm(rbr_actual~rbr_predict_als, data=rbr) #P=8.199e-11 *** Rsq=0.8391
summary(als_lm)
equation_als <- glue('y = {round(als_lm$coefficients[2],2)}x + {round(als_lm$coefficients[1], 2)}')
equation_als #y = 1.41x + -59.34
wrap_als <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_als), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_als, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.42x + -60.64\nRsq = 0.83\np<0.001', size=5)+
ggtitle("ALS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_als
#
zeb_lm <- lm(rbr_actual~rbr_predict_zeb, data=rbr) #P=7.074e-10 *** Rsq=0.8203
summary(zeb_lm)
equation_zeb <- glue('y = {round(zeb_lm$coefficients[2],2)}x + {round(zeb_lm$coefficients[1], 2)}')
equation_zeb #y = 1.48x + -64.19
wrap_zeb <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_zeb), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_zeb, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.49x + -65.29\nRsq = 0.83\np<0.001', size=5)+
ggtitle("HMLS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_zeb
uas_lm <- lm(rbr_actual~rbr_predict_uas, data=rbr) #P= 7.608e-06 *** Rsq= 0.7304
summary(uas_lm)
equation_uas <- glue('y = {round(uas_lm$coefficients[2],2)}x + {round(uas_lm$coefficients[1], 2)}')
equation_uas #y = 1.26x + -32.58
wrap_uas <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_uas), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_uas, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Burn Severity") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.3x + -35.54\nRsq = 0.80\np<0.001', size=5)+
ggtitle("UAS")+
theme(legend.position = 'none', axis.title = element_blank())
wrap_uas
#
banner_lm <- lm(rbr_actual~rbr_predict_banner, data=rbr) #P= 1.463e-09 *** Rsq=0.8082
summary(banner_lm)
equation_banner <- glue('y = {round(banner_lm$coefficients[2],2)}x + {round(banner_lm$coefficients[1], 2)}')
equation_banner #y = 1.35x + -50.5
wrap_banner <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_banner), col='black',size=1.5)+
geom_point(aes(x=rbr_predict_banner, color=rbr_class),size=3.5)+
scale_color_manual(values = c('red', 'orange', 'yellow2', 'green3'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="RBR Class") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
annotate('label', x=215, y=20, label='y = 1.32x + -45.62\nRsq = 0.77\np<0.001', size=5)+
ggtitle("Banner")+
theme(legend.position = 'bottom', axis.title = element_blank())+
guides(color = guide_legend(nrow = 3))
wrap_banner
wrap_all <- ggplot(data = rbr, aes(y=rbr_actual)) +
geom_abline(col='grey')+
geom_hline(yintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_vline(xintercept = c(35, 130, 298), linetype="dashed",col = 'grey')+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_tls, color='#E15759'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_als, color='#F28E2B'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_zeb, color='#F1CE63'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_uas, color='#59A14F'),size=1)+
geom_smooth(method = "lm", formula = y~x, se=F, aes(x=rbr_predict_banner, color='#499894'),size=1) +
geom_point(aes( x=rbr_predict_tls, color='#E15759'),size=2.5)+
geom_point(aes( x=rbr_predict_zeb, color='#F1CE63'),size=2.5)+
geom_point(aes( x=rbr_predict_als, color='#F28E2B'),size=2.5)+
geom_point(aes( x=rbr_predict_uas, color='#59A14F'),size=2.5)+
geom_point(aes( x=rbr_predict_banner, color='#499894'),size=2.5)+
scale_color_manual(values = c('#0D0887FF','#900DA4FF','#E16462FF','#FCCE25FF','#21908CFF'),
labels=c('TLS', 'MLS',
'ALS', 'UAS',
'Banner'))+
xlim(-5, 350) +
ylim(-5, 350) +
labs(colour="Method") +
annotate('text', x=340, y=7, label='NC', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=80, label='low', angle=90, size=7, color='grey50')+
annotate('text', x=340, y=215, label='medium', angle=90, size=7, color='grey50')+
annotate('text', x=335, y=335, label='high', angle=45, size=7, color='grey50')+
annotate('text', x=7, y=340, label='NC', size=7, color='grey50')+
annotate('text', x=80, y=340, label='low', size=7, color='grey50')+
annotate('text', x=215, y=340, label='medium', size=7, color='grey50')+
ggtitle("All Methods") +
theme(legend.position = 'bottom', axis.title = element_blank())+
guides(color = guide_legend(nrow = 3))
wrap_all
wrap_figure <-
ggarrange(
wrap_tls,
wrap_zeb,
wrap_als,
wrap_uas,
wrap_banner,
wrap_all,
ncol = 2,
nrow = 3,
widths = c(4.5, 4.5),
heights = c(4.5, 4.5, 5.25)
)
wrap_figure <- annotate_figure(wrap_figure,
bottom = text_grob('Predicted RBR',
family = 'serif',
size = 32),
left = text_grob('Actual RBR',
family = 'serif',
size = 32,
rot = 90))
ggsave(
wrap_out,
plot=wrap_figure,
width = 13,
height = 20,
units = 'in',
dpi = 300 )
|
#Group 15: Anisha Bhuiyan, Shermeen Khan, Sahil Rajapkar
#Assignment HW # 2
--------------------------------------
#Reading in the Facebook data through the CSV file.
csv_download_link = "http://guides.newman.baruch.cuny.edu/ld.php?content_id=39953204"
link_url = url(csv_download_link)
fb_data = read.csv(link_url,sep=";")
#previewing the data
head(fb_data)
#seeing the data structure
str(fb_data)
#examining the dimensions of the facebook data
dim(fb_data)
#We will be converting some variables to factor type
fb_data$Category = as.factor(fb_data$Category) #creating a categorical variable with defined values
fb_data$Post.Month = as.factor(fb_data$Post.Month) #creating a categorical variable with defined values
fb_data$Post.Weekday = as.factor(fb_data$Post.Weekday) #creating a categorical variable with defined values
fb_data$Post.Hour = as.factor(fb_data$Post.Hour) #creating a categorical variable with defined values
fb_data$Paid = as.factor(fb_data$Paid) #creating a categorical variable with defined values
fb_data$Type = as.factor(fb_data$Type) #creating a categorical variable with defined values
#seeing the reupdated data structure
str(fb_data)
#we are seeing the values within the Category variable
levels(fb_data$Category)
#converting the characters to recode, we are renaming the categories 1 to 3 based on the necessary data post information
fb_data$Category = as.character(fb_data$Category)
fb_data[fb_data$Category=="1", "Category"] = "action"
fb_data[fb_data$Category=="2", "Category"] = "product"
fb_data[fb_data$Category=="3", "Category"] = "inspiration"
#converting data back to factor type
fb_data$Category = as.factor(fb_data$Category)
#confirming levels was updated
levels(fb_data$Category)
#we are doing the missing values information first because it results in an error in the paid and non-paid recoding
#Counts and removes the number of null values in the set
sum(is.na(fb_data))
fb_data = na.omit(fb_data)
#Confirms that there are 0 missing values
sum(is.na(fb_data))
# This step will be used to recode the Paid variable values of 0-1 to non-paid and paid and change the data type accordingly
levels(fb_data$Paid)
fb_data$Paid = as.character(fb_data$Paid)
fb_data[fb_data$Paid=="0", "Paid"] = "non_paid"
fb_data[fb_data$Paid=="1", "Paid"] = "paid"
fb_data$Paid = as.factor((fb_data$Paid))
levels(fb_data$Paid)
#Using descriptive statistics to see the 5- number summary of and the standard deviation
summary((fb_data$share))
sd(fb_data$share)
#Plotting a histogram to see the distribution of shares
hist(fb_data$share)
hist(fb_data$share,xlab="Facebook Shares",ylab="# of shares",main="Histogram of Shares", breaks=100)
#Applying the xlim parameter to adjust the range and analyze the range of shares most posts fall under
hist(fb_data$share,xlab="Facebook Shares",ylab="# of shares",main="Histogram of Shares", breaks=100, xlim=c(0,200))
#Using descriptive statistics to examine the categorical variables to determine the trends in the data
categorical_variables = c("Category","Paid","Post.Month","Post.Weekday","Post.Hour")
summary(fb_data[,categorical_variables])
#Creating a bar plot for the monthly posting frequency, category of posts made, and number of paid posts
plot(fb_data$Post.Month,main="Posts by Month",xlab="Month",ylab="# of posts",las=2)
plot(fb_data$Category,main="Category",xlab="Type",ylab="# of each category")
plot(fb_data$Paid,main="Paid vs Non-paid ads",xlab="Type",ylab="# of posts")
# Comparing the relationship between likes and shares with a scatterplot
x=fb_data$like
y=fb_data$share
plot(x,y,xlab="likes",ylab="shares",main="likes vs shares")
plot(x,y,xlab="likes",ylab="shares",main="likes vs shares",
xlim=c(0,1000), ylim=c(0,200))
#The correlation between likes and shares to measure the strength of the relationship
cor(fb_data$like, fb_data$share)
#Creating a box plot to provide insight on the relationship of shares received per month
x=fb_data$Post.Month
y=fb_data$share
plot(x,y,ylim=c(0,500),las=2,xlab="month",ylab="shares", main="Shares by Month")
#Comparing the paid vs non-paid information using x tabs and aggregate functions
xtabs(~fb_data$share + fb_data$Paid)
aggregate(share~Paid,fb_data, sum)
#Comparing the performance between paid and non-paid posts using boxplots
x=fb_data$Paid
y=fb_data$share
plot(x,y,main="Shares for Non-Paid vs Paid posts",xlab="Non-paid vs Paid",
ylab="share",ylim=c(0,500))
#Creating 1 x 2 panel to compare shares on the non-paid and paid posts for each category. This will be used to provide insight about the current dataset.
par(mfrow=c(1,2))
x = fb_data[fb_data$Paid=="non_paid", "Category"]
y = fb_data[fb_data$Paid=="non_paid", "share"]
plot(x, y, las=2,ylab="share",ylim=c(0,500),main="Non-Paid")
x = fb_data[fb_data$Paid=="paid", "Category"]
y = fb_data[fb_data$Paid=="paid", "share"]
plot(x, y, las=2,ylab="share",ylim=c(0,500),main="Paid")
| /Facebook Marketing (1) (1) (3).R | no_license | THSSIR18/cis3920-project-2 | R | false | false | 4,922 | r | #Group 15: Anisha Bhuiyan, Shermeen Khan, Sahil Rajapkar
#Assignment HW # 2
--------------------------------------
#Reading in the Facebook data through the CSV file.
csv_download_link = "http://guides.newman.baruch.cuny.edu/ld.php?content_id=39953204"
link_url = url(csv_download_link)
fb_data = read.csv(link_url,sep=";")
#previewing the data
head(fb_data)
#seeing the data structure
str(fb_data)
#examining the dimensions of the facebook data
dim(fb_data)
#We will be converting some variables to factor type
fb_data$Category = as.factor(fb_data$Category) #creating a categorical variable with defined values
fb_data$Post.Month = as.factor(fb_data$Post.Month) #creating a categorical variable with defined values
fb_data$Post.Weekday = as.factor(fb_data$Post.Weekday) #creating a categorical variable with defined values
fb_data$Post.Hour = as.factor(fb_data$Post.Hour) #creating a categorical variable with defined values
fb_data$Paid = as.factor(fb_data$Paid) #creating a categorical variable with defined values
fb_data$Type = as.factor(fb_data$Type) #creating a categorical variable with defined values
#seeing the reupdated data structure
str(fb_data)
#we are seeing the values within the Category variable
levels(fb_data$Category)
#converting the characters to recode, we are renaming the categories 1 to 3 based on the necessary data post information
fb_data$Category = as.character(fb_data$Category)
fb_data[fb_data$Category=="1", "Category"] = "action"
fb_data[fb_data$Category=="2", "Category"] = "product"
fb_data[fb_data$Category=="3", "Category"] = "inspiration"
#converting data back to factor type
fb_data$Category = as.factor(fb_data$Category)
#confirming levels was updated
levels(fb_data$Category)
#we are doing the missing values information first because it results in an error in the paid and non-paid recoding
#Counts and removes the number of null values in the set
sum(is.na(fb_data))
fb_data = na.omit(fb_data)
#Confirms that there are 0 missing values
sum(is.na(fb_data))
# This step will be used to recode the Paid variable values of 0-1 to non-paid and paid and change the data type accordingly
levels(fb_data$Paid)
fb_data$Paid = as.character(fb_data$Paid)
fb_data[fb_data$Paid=="0", "Paid"] = "non_paid"
fb_data[fb_data$Paid=="1", "Paid"] = "paid"
fb_data$Paid = as.factor((fb_data$Paid))
levels(fb_data$Paid)
#Using descriptive statistics to see the 5- number summary of and the standard deviation
summary((fb_data$share))
sd(fb_data$share)
#Plotting a histogram to see the distribution of shares
hist(fb_data$share)
hist(fb_data$share,xlab="Facebook Shares",ylab="# of shares",main="Histogram of Shares", breaks=100)
#Applying the xlim parameter to adjust the range and analyze the range of shares most posts fall under
hist(fb_data$share,xlab="Facebook Shares",ylab="# of shares",main="Histogram of Shares", breaks=100, xlim=c(0,200))
#Using descriptive statistics to examine the categorical variables to determine the trends in the data
categorical_variables = c("Category","Paid","Post.Month","Post.Weekday","Post.Hour")
summary(fb_data[,categorical_variables])
#Creating a bar plot for the monthly posting frequency, category of posts made, and number of paid posts
plot(fb_data$Post.Month,main="Posts by Month",xlab="Month",ylab="# of posts",las=2)
plot(fb_data$Category,main="Category",xlab="Type",ylab="# of each category")
plot(fb_data$Paid,main="Paid vs Non-paid ads",xlab="Type",ylab="# of posts")
# Comparing the relationship between likes and shares with a scatterplot
x=fb_data$like
y=fb_data$share
plot(x,y,xlab="likes",ylab="shares",main="likes vs shares")
plot(x,y,xlab="likes",ylab="shares",main="likes vs shares",
xlim=c(0,1000), ylim=c(0,200))
#The correlation between likes and shares to measure the strength of the relationship
cor(fb_data$like, fb_data$share)
#Creating a box plot to provide insight on the relationship of shares received per month
x=fb_data$Post.Month
y=fb_data$share
plot(x,y,ylim=c(0,500),las=2,xlab="month",ylab="shares", main="Shares by Month")
#Comparing the paid vs non-paid information using x tabs and aggregate functions
xtabs(~fb_data$share + fb_data$Paid)
aggregate(share~Paid,fb_data, sum)
#Comparing the performance between paid and non-paid posts using boxplots
x=fb_data$Paid
y=fb_data$share
plot(x,y,main="Shares for Non-Paid vs Paid posts",xlab="Non-paid vs Paid",
ylab="share",ylim=c(0,500))
#Creating 1 x 2 panel to compare shares on the non-paid and paid posts for each category. This will be used to provide insight about the current dataset.
par(mfrow=c(1,2))
x = fb_data[fb_data$Paid=="non_paid", "Category"]
y = fb_data[fb_data$Paid=="non_paid", "share"]
plot(x, y, las=2,ylab="share",ylim=c(0,500),main="Non-Paid")
x = fb_data[fb_data$Paid=="paid", "Category"]
y = fb_data[fb_data$Paid=="paid", "share"]
plot(x, y, las=2,ylab="share",ylim=c(0,500),main="Paid")
|
#' Protein-protein interaction (PPI) networks for 5 microorganisms
#'
#' A dataset containing the protein-protein interaction networks for the
#' following 5 microorganisms
#' \itemize{
#' \item EBV
#' \itemize{
#' \item Common name: Epstein Barr virus
#' \item Scientific name: Human gammaherpesvirus 4
#' \item TaxonomyID: 10376
#' }
#' \item ECL
#' \itemize{
#' \item Common name: E.coli
#' \item Scientific name: Escherichia coli
#' \item TaxonomyID: 562
#' }
#' \item HSV-1
#' \itemize{
#' \item Common name: Herpes simplex virus type 1
#' \item Scientific name: Human alphaherpesvirus 1
#' \item TaxonomyID: 10298
#' }
#' \item KSHV
#' \itemize{
#' \item Common name: Karposi's Sarcoma-Associated Herpesvirus
#' \item Scientific name: Human gammaherpesvirus 8
#' \item TaxonomyID: 37296
#' }
#' \item VZV
#' \itemize{
#' \item Common name: Varicella zonster virus
#' \item Scientific name: Human alphaherpesvirus 3
#' \item TaxonomyID: 10335
#' }
#' }
#'
#' @format A list of \code{igraph} objects.
#' @source \strong{PPI data (EBV, HSV-1, KSHV, VZV):} Fossum E, Friedel CC, Rajagopala SV, Titz B, Baiker A, Schmidt T, et al. (2009) Evolutionarily Conserved Herpesviral Protein Interaction Networks. PLoS Pathog 5(9): e1000570. \url{https://doi.org/10.1371/journal.ppat.1000570}. Data from Table S2 in the supporting information.
#' @source \strong{PPI data (ECL):} Peregrín-Alvarez JM, Xiong X, Su C, Parkinson J (2009) The Modular Organization of Protein Interactions in Escherichia coli. PLoS Comput Biol 5(10): e1000523. \url{https://doi.org/10.1371/journal.pcbi.1000523}
#' @source \strong{Taxonomy ground truth:} NCBI taxonomy database. \url{https://www.ncbi.nlm.nih.gov/taxonomy}
#' @encoding UTF-8
"virusppi"
#' World trade networks from 1985–2014
#'
#' The world trade data set consists of a small sample of world trade networks for the years 2001-2014, and pre-computed subgraph counts of a larger set of world trade networks (1985–2014). The world trade networks are based on the data set from [Feenstra et al., 2005] for the years 1962- 2000 and on the United Nations division COMTRADE [Division, 2015] for the years 2001-2014.
#'
#' \itemize{
#' \item wtnets: List of \code{igraph} objects providing the world trade networks from 2001–2014.
#' \item Counts: Pre-computed graphlet counts for the world trade networks in the years 1985-2014.
#' }
#'
#' @format A list of two elements. The first element, 'wtnets', is a list of \code{igraph} objects providing a small sample of world trade networks from 2001–2014. The second element, 'Counts', is a list of pre-computed subgraph counts of world trade networks in the years 1985-2014.
#' @source \strong{World trade networks:}. United nations commodity trade statistics database (UN comtrade). http://comtrade.un.org/, 2015.
#' @source \strong{Subgraph Counts:} Feenstra RC,Lipsey RE, Deng H, Ma AC, and Mo H. (2005) World trade flows: 1962-2000. Technical report, National Bureau of Economic Research. (See also https://cid.econ.ucdavis.edu/wix.html).
#'
#' @encoding UTF-8
"worldtradesub"
| /R/data.R | permissive | alan-turing-institute/network-comparison | R | false | false | 3,191 | r | #' Protein-protein interaction (PPI) networks for 5 microorganisms
#'
#' A dataset containing the protein-protein interaction networks for the
#' following 5 microorganisms
#' \itemize{
#' \item EBV
#' \itemize{
#' \item Common name: Epstein Barr virus
#' \item Scientific name: Human gammaherpesvirus 4
#' \item TaxonomyID: 10376
#' }
#' \item ECL
#' \itemize{
#' \item Common name: E.coli
#' \item Scientific name: Escherichia coli
#' \item TaxonomyID: 562
#' }
#' \item HSV-1
#' \itemize{
#' \item Common name: Herpes simplex virus type 1
#' \item Scientific name: Human alphaherpesvirus 1
#' \item TaxonomyID: 10298
#' }
#' \item KSHV
#' \itemize{
#' \item Common name: Karposi's Sarcoma-Associated Herpesvirus
#' \item Scientific name: Human gammaherpesvirus 8
#' \item TaxonomyID: 37296
#' }
#' \item VZV
#' \itemize{
#' \item Common name: Varicella zonster virus
#' \item Scientific name: Human alphaherpesvirus 3
#' \item TaxonomyID: 10335
#' }
#' }
#'
#' @format A list of \code{igraph} objects.
#' @source \strong{PPI data (EBV, HSV-1, KSHV, VZV):} Fossum E, Friedel CC, Rajagopala SV, Titz B, Baiker A, Schmidt T, et al. (2009) Evolutionarily Conserved Herpesviral Protein Interaction Networks. PLoS Pathog 5(9): e1000570. \url{https://doi.org/10.1371/journal.ppat.1000570}. Data from Table S2 in the supporting information.
#' @source \strong{PPI data (ECL):} Peregrín-Alvarez JM, Xiong X, Su C, Parkinson J (2009) The Modular Organization of Protein Interactions in Escherichia coli. PLoS Comput Biol 5(10): e1000523. \url{https://doi.org/10.1371/journal.pcbi.1000523}
#' @source \strong{Taxonomy ground truth:} NCBI taxonomy database. \url{https://www.ncbi.nlm.nih.gov/taxonomy}
#' @encoding UTF-8
"virusppi"
#' World trade networks from 1985–2014
#'
#' The world trade data set consists of a small sample of world trade networks for the years 2001-2014, and pre-computed subgraph counts of a larger set of world trade networks (1985–2014). The world trade networks are based on the data set from [Feenstra et al., 2005] for the years 1962- 2000 and on the United Nations division COMTRADE [Division, 2015] for the years 2001-2014.
#'
#' \itemize{
#' \item wtnets: List of \code{igraph} objects providing the world trade networks from 2001–2014.
#' \item Counts: Pre-computed graphlet counts for the world trade networks in the years 1985-2014.
#' }
#'
#' @format A list of two elements. The first element, 'wtnets', is a list of \code{igraph} objects providing a small sample of world trade networks from 2001–2014. The second element, 'Counts', is a list of pre-computed subgraph counts of world trade networks in the years 1985-2014.
#' @source \strong{World trade networks:}. United nations commodity trade statistics database (UN comtrade). http://comtrade.un.org/, 2015.
#' @source \strong{Subgraph Counts:} Feenstra RC,Lipsey RE, Deng H, Ma AC, and Mo H. (2005) World trade flows: 1962-2000. Technical report, National Bureau of Economic Research. (See also https://cid.econ.ucdavis.edu/wix.html).
#'
#' @encoding UTF-8
"worldtradesub"
|
testlist <- list(a = 255L, b = -1L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610054630-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 112 | r | testlist <- list(a = 255L, b = -1L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
\name{so2}
\alias{so2}
\docType{data}
\title{SO2 levels from the LA pollution study
}
\description{Sulfur dioxide levels from the LA pollution study
}
\format{
The format is:
Time-Series [1:508] from 1970 to 1980: 3.37 2.59 3.29 3.04 3.39 2.57 2.35 3.38 1.5 2.56 ...
}
\seealso{\code{\link{lap}}}
\keyword{datasets}
| /astsa/man/so2.Rd | permissive | wilsonify/TimeSeries | R | false | false | 319 | rd | \name{so2}
\alias{so2}
\docType{data}
\title{SO2 levels from the LA pollution study
}
\description{Sulfur dioxide levels from the LA pollution study
}
\format{
The format is:
Time-Series [1:508] from 1970 to 1980: 3.37 2.59 3.29 3.04 3.39 2.57 2.35 3.38 1.5 2.56 ...
}
\seealso{\code{\link{lap}}}
\keyword{datasets}
|
=begin
=@False
Boolean @False()
==説明
Falseを返します。
==引数
なし
==エラー
*引数の数が合っていない場合
==条件
なし
==例
# False
# -> False
@False()
=end
| /docs/FalseFunction.rd | permissive | snakamura/q3 | R | false | false | 233 | rd | =begin
=@False
Boolean @False()
==説明
Falseを返します。
==引数
なし
==エラー
*引数の数が合っていない場合
==条件
なし
==例
# False
# -> False
@False()
=end
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as-data-frame-har.R
\name{as_data_frame.harentry}
\alias{as_data_frame.harentry}
\alias{as_data_frame.harentries}
\alias{as_data_frame.har}
\alias{as.data.frame.har}
\alias{as.data.frame.harentries}
\alias{as.data.frame.harentry}
\title{Turns a "HAR"-like object into a data frame(tibble)}
\usage{
as_data_frame.harentry(x, ...)
as_data_frame.harentries(x, ...)
as_data_frame.har(x, ...)
\method{as.data.frame}{har}(x, ...)
\method{as.data.frame}{harentries}(x, ...)
\method{as.data.frame}{harentry}(x, ...)
}
\arguments{
\item{x}{A \code{harentry} object}
\item{...}{ignored}
}
\value{
data frame (tibble)
}
\description{
Turns a "HAR"-like object into a data frame(tibble)
}
| /man/as_data_frame.harentry.Rd | no_license | cran/splashr | R | false | true | 761 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as-data-frame-har.R
\name{as_data_frame.harentry}
\alias{as_data_frame.harentry}
\alias{as_data_frame.harentries}
\alias{as_data_frame.har}
\alias{as.data.frame.har}
\alias{as.data.frame.harentries}
\alias{as.data.frame.harentry}
\title{Turns a "HAR"-like object into a data frame(tibble)}
\usage{
as_data_frame.harentry(x, ...)
as_data_frame.harentries(x, ...)
as_data_frame.har(x, ...)
\method{as.data.frame}{har}(x, ...)
\method{as.data.frame}{harentries}(x, ...)
\method{as.data.frame}{harentry}(x, ...)
}
\arguments{
\item{x}{A \code{harentry} object}
\item{...}{ignored}
}
\value{
data frame (tibble)
}
\description{
Turns a "HAR"-like object into a data frame(tibble)
}
|
#Q1
set.seed(1)
rmse <- replicate(100, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm(y ~ x, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
})
mean(rmse)
sd(rmse)
#Q2
set.seed(1)
n <- c(100, 500, 1000, 5000, 10000)
res <- sapply(n, function(n){
Sigma <- 9*matrix(c(1.0, 0.5, 0.5, 1.0), 2, 2)
dat <- MASS::mvrnorm(n, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
rmse <- replicate(100, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm(y ~ x, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
})
c(avg = mean(rmse), sd = sd(rmse))
})
res
#Q4
set.seed(1)
n <- 100
Sigma <- 9*matrix(c(1.0, 0.95, 0.95, 1.0), 2, 2)
dat <- MASS::mvrnorm(n = 100, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
rmse <- replicate(100, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm(y ~ x, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
})
mean(rmse)
sd(rmse)
| /test 4.R | no_license | thinhle1304/machinelearning | R | false | false | 1,421 | r | #Q1
set.seed(1)
rmse <- replicate(100, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm(y ~ x, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
})
mean(rmse)
sd(rmse)
#Q2
set.seed(1)
n <- c(100, 500, 1000, 5000, 10000)
res <- sapply(n, function(n){
Sigma <- 9*matrix(c(1.0, 0.5, 0.5, 1.0), 2, 2)
dat <- MASS::mvrnorm(n, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
rmse <- replicate(100, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm(y ~ x, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
})
c(avg = mean(rmse), sd = sd(rmse))
})
res
#Q4
set.seed(1)
n <- 100
Sigma <- 9*matrix(c(1.0, 0.95, 0.95, 1.0), 2, 2)
dat <- MASS::mvrnorm(n = 100, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
rmse <- replicate(100, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm(y ~ x, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
})
mean(rmse)
sd(rmse)
|
rbind(day1.learn,day2.learn) %>%
mutate(symbl_pair=fct_relevel(symbl_pair, "AB","CD","EF")) -> d
d %>% group_by(subj) %>% summarize(group=first(group)) -> gd
stan.data=list(
N=length(unique(d$subj)),
T=length(unique(d$trial))*2, ## both days one after the other
group_n_cond=as.integer(gd$group=="n_cond"),
group_p_cond=as.integer(gd$group=="p_cond"),
group_n_cntrl=as.integer(gd$group=="n_cntrl"),
group_p_cntrl=as.integer(gd$group=="p_cntrl"),
pair=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$symbl_pair)})
),
day2=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$day==2)})
),
outcome=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$reward)})
),
accuracy=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$accuracy)})
)
)
| /munge/01-A_standata.R | no_license | ZsoltTuri/2018-placebo-nocebo-study | R | false | false | 919 | r | rbind(day1.learn,day2.learn) %>%
mutate(symbl_pair=fct_relevel(symbl_pair, "AB","CD","EF")) -> d
d %>% group_by(subj) %>% summarize(group=first(group)) -> gd
stan.data=list(
N=length(unique(d$subj)),
T=length(unique(d$trial))*2, ## both days one after the other
group_n_cond=as.integer(gd$group=="n_cond"),
group_p_cond=as.integer(gd$group=="p_cond"),
group_n_cntrl=as.integer(gd$group=="n_cntrl"),
group_p_cntrl=as.integer(gd$group=="p_cntrl"),
pair=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$symbl_pair)})
),
day2=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$day==2)})
),
outcome=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$reward)})
),
accuracy=do.call(rbind,
d %>% split(d$subj) %>% map(function(df){ as.integer(df$accuracy)})
)
)
|
with(af3a895751f014aa185f8deb89b4a92cb, {ROOT <- 'D:/ATS2.0/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/834ef95c-965c-42d6-b939-0419704fe83f';options(digits.secs=NULL);FRAME941572[,(c('DATA_COLLECTION_TIME')) := lapply(.SD, function(x) as.POSIXct(fast_strptime(x, format='%Y-%m-%d %H:%M:%S'))), .SDcols = c('DATA_COLLECTION_TIME')]}); | /834ef95c-965c-42d6-b939-0419704fe83f/R/Temp/aKnNbAKfamY5Z.R | no_license | ayanmanna8/test | R | false | false | 384 | r | with(af3a895751f014aa185f8deb89b4a92cb, {ROOT <- 'D:/ATS2.0/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/834ef95c-965c-42d6-b939-0419704fe83f';options(digits.secs=NULL);FRAME941572[,(c('DATA_COLLECTION_TIME')) := lapply(.SD, function(x) as.POSIXct(fast_strptime(x, format='%Y-%m-%d %H:%M:%S'))), .SDcols = c('DATA_COLLECTION_TIME')]}); |
rm(list=ls())
library(tidyverse)
library(curl)
library(forcats)
library(RcppRoll)
library(data.table)
library(readxl)
library(cowplot)
library(sf)
library(rmapshaper)
library(gganimate)
library(paletteer)
library(ggtext)
options(scipen = 999)
#Read in COVID case data
temp <- tempfile()
source <- "https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
data <- fread(temp)[,c(1:6)]
colnames(data) <- c("name", "code", "type", "date", "cases", "cumul_cases")
data$date <- as.Date(data$date)
data <- subset(data, type=="utla")
#Set up skeleton dataframe with dates
LAcodes <- unique(data$code)
min <- min(data$date)
max <- max(data$date)
skeleton <- data.frame(code=rep(LAcodes, each=(max-min+1), times=1), date=rep(seq.Date(from=min, to=max, by="day"), each=1, times=length(LAcodes)))
#Map data onto skeleton
fulldata <- merge(skeleton, data[,-c(1,3)], by=c("code", "date"), all.x=TRUE, all.y=TRUE)
#Bring in LA names
temp <- data %>%
group_by(code) %>%
slice(1L)
fulldata <- merge(fulldata, temp[,c(1,2)], by="code")
#Fill in blank days
fulldata$cases <- ifelse(is.na(fulldata$cases), 0, fulldata$cases)
#Calculate cumulative sums so far
fulldata <- fulldata %>%
group_by(code) %>%
mutate(cumul_cases=cumsum(cases))
#this is the deaths for each NHS trust (only deaths in hospital) - we dont have daily deaths by LA yet
#so need to map this to LA. Approach to do this developed by and code adapted from @Benj_barr.
#Need to manually update the link to the latest total announced deaths file here:
#https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-daily-deaths/
#and extend the final number value in rows 78 & 80 by 1 to capture additional days (67=1st May announcement date)
temp <- tempfile()
source <- "https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2020/11/COVID-19-total-announced-deaths-20-November-2020.xlsx"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
deaths<-as.data.table(read_excel(temp, sheet=6, col_names = F))
deaths<-deaths[18:.N, c(1:270)]
deaths<- melt.data.table(deaths, id=1:4, measure.vars = 5:270)
deaths[, 2:=NULL]
names(deaths)<-c("region", "procode3","trust","variable","deaths")
deaths$procode3 <- substr(deaths$procode3, 1, 3)
deaths[order(variable), date:=1:.N, by=.(procode3)]
deaths[, date:=as.Date("2020-02-29")+as.numeric(substr(variable, 4, 6))-5]
deaths[, variable:=NULL]
deaths$deaths <- as.numeric(deaths$deaths)
#deaths<-deaths[, list(deaths=sum(as.numeric(deaths),na.rm = T)), by=.(procode3,trust)]
sum(deaths$deaths)
# this is the number of all emergency admissions in 2018-2019 for each trust split by LA
dt1<-fread("Data/la_trust_lk.csv")
dt1<-dt1[is.na(areacode)==F]
length(unique(dt1$areacode)) #150
sum(deaths$deaths)
deaths<-merge(deaths, dt1, by="procode3", all.x = T, allow.cartesian = TRUE)
deaths[, fraction:=CountAdm/sum(CountAdm,na.rm = T),by=.(procode3, date)]
# 66 deaths not allocated to LA, trust is not in look up
sum(deaths[is.na(fraction)==T]$deaths)
deaths<-deaths[is.na(fraction)==F]
#we then assume that deaths from each Trust were distributed between LAs based on the historical share of admissions from that LA
deaths[, deaths:=deaths*fraction]
# 2019 LA boundary changes
#deaths[areaname=="Bournemouth"|areaname=="Poole",`:=` (areacode="E10000009",areaname="Dorset CC")]
deaths[areaname=="Isles of Scilly",`:=` (areacode="E06000052",areaname="Cornwall")]
deaths[areacode=="E10000009",`:=` (areacode="E06000059")]
deaths[areacode=="E06000029",`:=` (areacode="E06000058")]
deaths <- deaths %>%
group_by(areacode, date) %>%
summarise(deaths=sum(deaths, na.rm=TRUE))
length(unique(deaths$areacode)) #147
sum(deaths$deaths)
colnames(deaths) <- c("code", "date", "deaths")
fulldata <- merge(fulldata, deaths, by=c("code", "date"), all.x=TRUE)
heatmap <- fulldata %>%
group_by(code) %>%
mutate(casesroll_avg=roll_mean(cases, 7, align="right", fill=0), deathsroll_avg=roll_mean(deaths, 7, align="right", fill=0)) %>%
mutate(totalcases=max(cumul_cases), maxcaserate=max(casesroll_avg), maxcaseday=date[which(casesroll_avg==maxcaserate)][1],
cumul_deaths=sum(deaths, na.rm=TRUE), totaldeaths=max(cumul_deaths, na.rm=TRUE), maxdeathrate=max(deathsroll_avg, na.rm=TRUE),
maxdeathsday=date[which(deathsroll_avg==maxdeathrate)][1])
heatmap$maxcaseprop <- heatmap$casesroll_avg/heatmap$maxcaserate
heatmap$maxdeathprop <- heatmap$deathsroll_avg/heatmap$maxdeathrate
#Enter dates to plot from and to
plotfrom <- "2020-03-03"
plotto <- max(heatmap$date)
#Plot case trajectories
casetiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxcaseday), fill=maxcaseprop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
#scale_fill_viridis_c()+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 cases in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases, normalised to the maximum value within the Local Authority.\nLAs are ordered by the date at which they reached their peak number of new cases. Bars on the right represent the absolute number of cases in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text.y=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
casebars <- ggplot(subset(heatmap, date==maxcaseday), aes(x=totalcases, y=fct_reorder(name, maxcaseday), fill=totalcases))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
#scale_fill_viridis_c()+
scale_x_continuous(name="Total confirmed cases", breaks=c(0,2000,4000,6000,8000,10000))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLACasesHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(casetiles, casebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLACasesHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(casetiles, casebars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot death trajectories
deathtiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxdeathsday), fill=maxdeathprop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="")+
scale_x_date(name="Date", limits=as.Date(c("2020-03-06", plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 deaths in hospital in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of estimated deaths, normalised to the maximum value within the Local Authority.\nLAs are ordered by the date at which they reached their peak number of deaths. Bars on the right represent the absolute number of deaths estimated\nin each LA. Deaths are estimated as COVID-19 mortality data is only available from NHS England at hospital level. LA-level deaths are modelled using\n@Benj_Barr's approach, using the proportion of HES emergency admissions to each hospital in 2018-19 originating from each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from NHS England & Ben Barr | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
deathbars <- ggplot(subset(heatmap, date==maxdeathsday), aes(x=totaldeaths, y=fct_reorder(name, maxdeathsday), fill=totaldeaths))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed deaths")+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLADeathHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(deathtiles, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLADeathHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(deathtiles, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
##################################
#Absolute version of the heatmaps#
##################################
#Bring in population data
temp <- tempfile()
source <- "https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fpopulationestimatesforukenglandandwalesscotlandandnorthernireland%2fmid20182019laboundaries/ukmidyearestimates20182019ladcodes.xls"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
LApop <- read_excel(temp, sheet="MYE2-All", range="A5:D367", col_names=TRUE)
colnames(LApop) <- c("code", "name", "geography", "pop")
heatmap <- merge(heatmap, LApop[,c(1,4)], by="code")
heatmap$cumul_caserate <- heatmap$totalcases*100000/heatmap$pop
heatmap$cumul_deathrate <- heatmap$totaldeaths*100000/heatmap$pop
heatmap$avgcaserates <- heatmap$casesroll_avg*100000/heatmap$pop
#Plot absolute case trajectories
abscasetiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxcaseday), fill=casesroll_avg))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 cases in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of cases. Bars on the right represent the cumulative number of cases per 100,000 population in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"))
abscasebars <- ggplot(subset(heatmap, date==maxcaseday), aes(x=cumul_caserate, y=fct_reorder(name, maxcaseday), fill=cumul_caserate))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed cases\nper 100,000 population", breaks=c(0,500,1000,1500))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLACasesHeatmapAbs.tiff", units="in", width=16, height=16, res=500)
plot_grid(abscasetiles, abscasebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLACasesHeatmapAbs.png", units="in", width=16, height=16, res=500)
plot_grid(abscasetiles, abscasebars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot absolute case rate trajectories
ratetiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxcaseday), fill=avgcaserates))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 case rates in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases per 100,000 population within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of cases. Bars on the right represent the total population of each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(1.2)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
ratebars <- ggplot(subset(heatmap, date==maxcaseday), aes(x=pop, y=fct_reorder(name, maxcaseday), fill=pop))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Population")+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLARateHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(ratetiles, ratebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLARateHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(ratetiles, ratebars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot absolute death trajectories
death <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxdeathsday), fill=deathsroll_avg))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c("2020-03-06", plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 deaths in hospital in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of daily confirmed deaths within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of deaths Bars on the right represent the cumulative number of cases per 100,000 population in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
deathbars <- ggplot(subset(heatmap, date==maxdeathsday), aes(x=cumul_deathrate, y=fct_reorder(name, maxdeathsday), fill=cumul_deathrate))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed deaths\nper 100,000 population", breaks=c(0,50,100))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLADeathsHeatmapAbs.tiff", units="in", width=16, height=16, res=500)
plot_grid(death, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLADeathsHeatmapAbs.png", units="in", width=16, height=16, res=500)
plot_grid(death, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot death rate trajectories
deathrate <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxdeathsday), fill=deathsroll_avg*100000/pop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c("2020-03-06", plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 death rates in hospitals in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of daily confirmed deaths per 100,000 within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of deaths Bars on the right represent the cumulative number of cases per 100,000 population in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from NHS England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
deathratebars <- ggplot(subset(heatmap, date==maxdeathsday), aes(x=pop, y=fct_reorder(name, maxdeathsday), fill=pop))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Population")+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLADeathsRateHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(deathrate, deathratebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLADeathsRateHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(deathrate, deathratebars, align="h", rel_widths=c(1,0.2))
dev.off()
##########################
#Map of case trajectories#
##########################
#Download shapefile of LA boundaries
temp <- tempfile()
temp2 <- tempfile()
source <- "https://opendata.arcgis.com/datasets/6638c31a8e9842f98a037748f72258ed_0.zip?outSR=%7B%22latestWkid%22%3A27700%2C%22wkid%22%3A27700%7D"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
#The actual shapefile has a different name each time you download it, so need to fish the name out of the unzipped file
name <- list.files(temp2, pattern=".shp")
shapefile <- st_read(file.path(temp2, name))
names(shapefile)[names(shapefile) == "ctyua17cd"] <- "code"
simplemap <- ms_simplify(shapefile, keep=0.2, keep_shapes = TRUE)
#Duplicate data to account for shapefile using pre-2019 codes
int1 <- filter(heatmap, name=="Bournemouth, Christchurch and Poole")
int1$code <- "E06000028"
int2 <- filter(heatmap, name=="Bournemouth, Christchurch and Poole")
int2$code <- "E06000029"
int3 <- filter(heatmap, name=="Bournemouth, Christchurch and Poole")
int3$code <- "E10000009"
temp <- rbind(heatmap, int1, int2, int3)
#Calculate change in cases in the past week
change <- temp %>%
mutate(change=casesroll_avg-lag(casesroll_avg,7))
#Exclude most recent day as reporting is usually very incomplete
change <- subset(change, date==max-3)
map.change <- full_join(simplemap, change, by="code", all.y=TRUE)
map.change <- map.change %>% drop_na("maxcaseprop")
#Map of past week changes
changemap <- ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)),
name="Change in case numbers\nin the past week", breaks=c(-60,-30, 0,30,60),
labels=c("-60", "-30", "0", "+30", "+60"),direction=-1)+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.subtitle=element_markdown())+
labs(title="Recent changes in COVID-19 case numbers across England",
subtitle="<span style='color:Grey50;'>Has the 7-day rolling average of case numbers <span style='color:#854B01FF;'>risen<span style='color:Grey50;'> or <span style='color:#014380FF;'>fallen<span style='color:Grey50;'> in the past week?<br>Areas with 0 cases shown in <span style='color:#41ab5d;'>green",
caption="Data from Public Health England | Plot by @VictimOfMaths")+
geom_rect(aes(xmin=500000, xmax=560000, ymin=156000, ymax=200000), fill="transparent",
colour="gray50")+
geom_rect(aes(xmin=310000, xmax=405000, ymin=370000, ymax=430000), fill="transparent",
colour="gray50")+
geom_rect(aes(xmin=405000, xmax=490000, ymin=505000, ymax=580000), fill="transparent",
colour="gray50")
#Add zoomed in areas
#London
London <- ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA, show.legend=FALSE)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
scale_x_continuous(limits=c(500000,560000), expand=c(0,0))+
scale_y_continuous(limits=c(156000,200000), expand=c(0,0))+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)), direction=-1)+
labs(title="Greater London")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(size=rel(0.9)))
#North-West England
NWEng <-ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA, show.legend=FALSE)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
scale_x_continuous(limits=c(310000,405000), expand=c(0,0))+
scale_y_continuous(limits=c(370000,430000), expand=c(0,0))+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)), direction=-1)+
labs(title="The North West")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(size=rel(0.9)))
#Tyne/Tees
NEEng <- ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA, show.legend=FALSE)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
scale_x_continuous(limits=c(405000,490000), expand=c(0,0))+
scale_y_continuous(limits=c(505000,580000), expand=c(0,0))+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)), direction=-1)+
labs(title="The North East")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(size=rel(0.9)))
tiff("Outputs/COVIDChangesmapEng.tiff", units="in", width=9, height=11, res=500)
ggdraw()+
draw_plot(changemap)+
draw_plot(London, 0.01,0.34,0.32,0.21)+
draw_plot(NWEng, 0.01,0.57, 0.32, 0.24)+
draw_plot(NEEng, 0.57, 0.62, 0.22, 0.22)
dev.off()
#For animation
map.data <- full_join(simplemap, temp, by="code", all.y=TRUE)
#remove areas with no HLE data (i.e. Scotland, Wales & NI)
map.data <- map.data %>% drop_na("maxcaseprop")
#Animation of case trajectories
CaseAnim <- ggplot(subset(map.data, date>as.Date("2020-02-25")), aes(geometry=geometry, fill=maxcaseprop))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily cases as a %\nof peak cases", breaks=c(0,0.25,0.5,0.75,1),
labels=c("0%", "25%", "50%", "75%", "100%"))+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed cases coloured relative to the\npeak in each Local Authority (i.e. dark red represents the peak of new cases).\nDate: {frame_time}",
caption="Data from Public Health England | Visualisation by @VictimOfMaths")
animate(CaseAnim, duration=25, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/CaseAnim.gif"), end_pause=60)
#Animation of death trajectories
DeathAnim <- ggplot(subset(map.data, date>as.Date("2020-03-03")), aes(geometry=geometry, fill=maxdeathprop))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily deaths as a %\nof peak deaths", breaks=c(0,0.25,0.5,0.75,1),
labels=c("0%", "25%", "50%", "75%", "100%"))+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed deaths coloured relative to the\npeak in each Local Authority (i.e. dark red represents the peak in deaths).\nDate: {frame_time}",
caption="Data from NHS England | Visualisation by @VictimOfMaths")
animate(DeathAnim, duration=18, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/DeathAnim.gif"), end_pause=60)
#Animation of absolute case numbers
CaseAnimAbs <- ggplot(subset(map.data, date>as.Date("2020-02-25")), aes(geometry=geometry, fill=casesroll_avg))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily confirmed cases", na.value="white")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed cases.\nDate: {frame_time}",
caption="Data from Public Health England | Visualisation by @VictimOfMaths")
animate(CaseAnimAbs, duration=25, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/CaseAnimAbs.gif"), end_pause=60)
#Animation of death rates
DeathRateAnim <- ggplot(subset(map.data, date>as.Date("2020-03-03")), aes(geometry=geometry, fill=deathsroll_avg*100000/pop))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily deaths\nper 100,000")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed COVID-19 deaths in hospitals per 100,000\nDate: {frame_time}",
caption="Data from NHS England | Visualisation by @VictimOfMaths")
animate(DeathRateAnim, duration=18, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/DeathRateAnim.gif"), end_pause=60)
#Quick analysis of potential COVID 'bumps'
temp1 <- subset(heatmap, name %in% c("Dorset", "Cornwall and Isles of Scilly", "Devon", "Bournemouth, Christchurch and Poole",
"West Sussex", "East Sussex", "Brighton and Hove"))
tiff("Outputs/COVIDSouthCoast.tiff", units="in", width=8, height=4, res=500)
ggplot(subset(temp1, date>"2020-05-01"), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="No clear signs of a rise in cases after the sunny May weather",
subtitle="7-day rolling average of new confirmed COVID-19 cases",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
temp2 <- subset(heatmap, name %in% c("Islington", "Camden", "Hackney", "Southwark", "Tower Hamlets",
"Lambeth", "Lewisham", "Haringey", "Westminster", "Kensington and Chelsea",
"Hammersmith and Fulham", "Wandsworth", "Lewisham", "Newham"))
tiff("Outputs/COVIDLondon.tiff", units="in", width=8, height=5, res=500)
ggplot(subset(temp2, date>"2020-05-01"), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="No clear evidence of a rise in cases after the protests in Central London",
subtitle="7-day rolling average of new confirmed COVID-19 cases",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
temp3 <- subset(heatmap, name %in% c("Leicester", "Bedford", "Barnsley", "Rotherham",
"Kirklees", "Bradford", "Rochdale", "Oldham",
"Tameside", "Blackburn with Darwen"))
tiff("Outputs/COVIDPillarsHeatmap.tiff", units="in", width=10, height=5, res=500)
ggplot(subset(temp3, date>"2020-05-01"), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="Mixed Pillar 1 trajectories in areas with high combined Pillar 1 and 2 tests in week 25",
subtitle="7-day rolling average of new confirmed COVID-19 cases",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
#Graph of pillar 1 tests in any LA you like
LA <- "Bradford"
tiff(paste0("Outputs/COVIDNewCases", LA, ".tiff"), units="in", width=8, height=6, res=500)
ggplot()+
geom_col(data=subset(heatmap, name==LA), aes(x=date, y=cases), fill="skyblue2")+
geom_line(data=subset(heatmap, name==LA & date<max-1), aes(x=date, y=casesroll_avg), colour="red")+
scale_x_date(name="Date")+
scale_y_continuous("New COVID-19 cases")+
theme_classic()+
theme(plot.subtitle=element_markdown())+
labs(title=paste0("Confirmed new COVID cases in ",LA),
subtitle="Confirmed new COVID-19 cases identified through combined pillar 1 & 2 testing and the <span style='color:Red;'>7-day rolling average",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
#New lockdown areas
LA <- c("Calderdale", "Blackburn with Darwen", "Leicester", "Bury", "Oldham", "Manchester",
"Salford", "Rochdale", "Stockport", "Tameside", "Trafford", "Wigan", "Bolton",
"Kirklees", "Lancashire")
tiff("Outputs/COVIDNewLockdown.tiff", units="in", width=10, height=5, res=500)
ggplot(subset(heatmap, name %in% LA), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
geom_segment(aes(x=as.Date("2020-06-29"), xend=as.Date("2020-06-29"), y=0, yend=16), colour="NavyBlue", linetype=2)+
geom_segment(aes(x=as.Date("2020-07-31"), xend=as.Date("2020-07-31"), y=0, yend=16), colour="Red", linetype=2)+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="Trajectories of COVID cases in areas with second lockdown restrictions",
subtitle="7-day rolling average of new confirmed COVID-19 cases per 100,000 inhabitants",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
| /Heatmaps/English LA Heatmaps.R | no_license | cymack/COVID-20 | R | false | false | 31,405 | r | rm(list=ls())
library(tidyverse)
library(curl)
library(forcats)
library(RcppRoll)
library(data.table)
library(readxl)
library(cowplot)
library(sf)
library(rmapshaper)
library(gganimate)
library(paletteer)
library(ggtext)
options(scipen = 999)
#Read in COVID case data
temp <- tempfile()
source <- "https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
data <- fread(temp)[,c(1:6)]
colnames(data) <- c("name", "code", "type", "date", "cases", "cumul_cases")
data$date <- as.Date(data$date)
data <- subset(data, type=="utla")
#Set up skeleton dataframe with dates
LAcodes <- unique(data$code)
min <- min(data$date)
max <- max(data$date)
skeleton <- data.frame(code=rep(LAcodes, each=(max-min+1), times=1), date=rep(seq.Date(from=min, to=max, by="day"), each=1, times=length(LAcodes)))
#Map data onto skeleton
fulldata <- merge(skeleton, data[,-c(1,3)], by=c("code", "date"), all.x=TRUE, all.y=TRUE)
#Bring in LA names
temp <- data %>%
group_by(code) %>%
slice(1L)
fulldata <- merge(fulldata, temp[,c(1,2)], by="code")
#Fill in blank days
fulldata$cases <- ifelse(is.na(fulldata$cases), 0, fulldata$cases)
#Calculate cumulative sums so far
fulldata <- fulldata %>%
group_by(code) %>%
mutate(cumul_cases=cumsum(cases))
#this is the deaths for each NHS trust (only deaths in hospital) - we dont have daily deaths by LA yet
#so need to map this to LA. Approach to do this developed by and code adapted from @Benj_barr.
#Need to manually update the link to the latest total announced deaths file here:
#https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-daily-deaths/
#and extend the final number value in rows 78 & 80 by 1 to capture additional days (67=1st May announcement date)
temp <- tempfile()
source <- "https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2020/11/COVID-19-total-announced-deaths-20-November-2020.xlsx"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
deaths<-as.data.table(read_excel(temp, sheet=6, col_names = F))
deaths<-deaths[18:.N, c(1:270)]
deaths<- melt.data.table(deaths, id=1:4, measure.vars = 5:270)
deaths[, 2:=NULL]
names(deaths)<-c("region", "procode3","trust","variable","deaths")
deaths$procode3 <- substr(deaths$procode3, 1, 3)
deaths[order(variable), date:=1:.N, by=.(procode3)]
deaths[, date:=as.Date("2020-02-29")+as.numeric(substr(variable, 4, 6))-5]
deaths[, variable:=NULL]
deaths$deaths <- as.numeric(deaths$deaths)
#deaths<-deaths[, list(deaths=sum(as.numeric(deaths),na.rm = T)), by=.(procode3,trust)]
sum(deaths$deaths)
# this is the number of all emergency admissions in 2018-2019 for each trust split by LA
dt1<-fread("Data/la_trust_lk.csv")
dt1<-dt1[is.na(areacode)==F]
length(unique(dt1$areacode)) #150
sum(deaths$deaths)
deaths<-merge(deaths, dt1, by="procode3", all.x = T, allow.cartesian = TRUE)
deaths[, fraction:=CountAdm/sum(CountAdm,na.rm = T),by=.(procode3, date)]
# 66 deaths not allocated to LA, trust is not in look up
sum(deaths[is.na(fraction)==T]$deaths)
deaths<-deaths[is.na(fraction)==F]
#we then assume that deaths from each Trust were distributed between LAs based on the historical share of admissions from that LA
deaths[, deaths:=deaths*fraction]
# 2019 LA boundary changes
#deaths[areaname=="Bournemouth"|areaname=="Poole",`:=` (areacode="E10000009",areaname="Dorset CC")]
deaths[areaname=="Isles of Scilly",`:=` (areacode="E06000052",areaname="Cornwall")]
deaths[areacode=="E10000009",`:=` (areacode="E06000059")]
deaths[areacode=="E06000029",`:=` (areacode="E06000058")]
deaths <- deaths %>%
group_by(areacode, date) %>%
summarise(deaths=sum(deaths, na.rm=TRUE))
length(unique(deaths$areacode)) #147
sum(deaths$deaths)
colnames(deaths) <- c("code", "date", "deaths")
fulldata <- merge(fulldata, deaths, by=c("code", "date"), all.x=TRUE)
heatmap <- fulldata %>%
group_by(code) %>%
mutate(casesroll_avg=roll_mean(cases, 7, align="right", fill=0), deathsroll_avg=roll_mean(deaths, 7, align="right", fill=0)) %>%
mutate(totalcases=max(cumul_cases), maxcaserate=max(casesroll_avg), maxcaseday=date[which(casesroll_avg==maxcaserate)][1],
cumul_deaths=sum(deaths, na.rm=TRUE), totaldeaths=max(cumul_deaths, na.rm=TRUE), maxdeathrate=max(deathsroll_avg, na.rm=TRUE),
maxdeathsday=date[which(deathsroll_avg==maxdeathrate)][1])
heatmap$maxcaseprop <- heatmap$casesroll_avg/heatmap$maxcaserate
heatmap$maxdeathprop <- heatmap$deathsroll_avg/heatmap$maxdeathrate
#Enter dates to plot from and to
plotfrom <- "2020-03-03"
plotto <- max(heatmap$date)
#Plot case trajectories
casetiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxcaseday), fill=maxcaseprop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
#scale_fill_viridis_c()+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 cases in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases, normalised to the maximum value within the Local Authority.\nLAs are ordered by the date at which they reached their peak number of new cases. Bars on the right represent the absolute number of cases in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text.y=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
casebars <- ggplot(subset(heatmap, date==maxcaseday), aes(x=totalcases, y=fct_reorder(name, maxcaseday), fill=totalcases))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
#scale_fill_viridis_c()+
scale_x_continuous(name="Total confirmed cases", breaks=c(0,2000,4000,6000,8000,10000))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLACasesHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(casetiles, casebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLACasesHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(casetiles, casebars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot death trajectories
deathtiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxdeathsday), fill=maxdeathprop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="")+
scale_x_date(name="Date", limits=as.Date(c("2020-03-06", plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 deaths in hospital in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of estimated deaths, normalised to the maximum value within the Local Authority.\nLAs are ordered by the date at which they reached their peak number of deaths. Bars on the right represent the absolute number of deaths estimated\nin each LA. Deaths are estimated as COVID-19 mortality data is only available from NHS England at hospital level. LA-level deaths are modelled using\n@Benj_Barr's approach, using the proportion of HES emergency admissions to each hospital in 2018-19 originating from each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from NHS England & Ben Barr | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
deathbars <- ggplot(subset(heatmap, date==maxdeathsday), aes(x=totaldeaths, y=fct_reorder(name, maxdeathsday), fill=totaldeaths))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed deaths")+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLADeathHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(deathtiles, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLADeathHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(deathtiles, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
##################################
#Absolute version of the heatmaps#
##################################
#Bring in population data
temp <- tempfile()
source <- "https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fpopulationestimatesforukenglandandwalesscotlandandnorthernireland%2fmid20182019laboundaries/ukmidyearestimates20182019ladcodes.xls"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
LApop <- read_excel(temp, sheet="MYE2-All", range="A5:D367", col_names=TRUE)
colnames(LApop) <- c("code", "name", "geography", "pop")
heatmap <- merge(heatmap, LApop[,c(1,4)], by="code")
heatmap$cumul_caserate <- heatmap$totalcases*100000/heatmap$pop
heatmap$cumul_deathrate <- heatmap$totaldeaths*100000/heatmap$pop
heatmap$avgcaserates <- heatmap$casesroll_avg*100000/heatmap$pop
#Plot absolute case trajectories
abscasetiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxcaseday), fill=casesroll_avg))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 cases in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of cases. Bars on the right represent the cumulative number of cases per 100,000 population in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"))
abscasebars <- ggplot(subset(heatmap, date==maxcaseday), aes(x=cumul_caserate, y=fct_reorder(name, maxcaseday), fill=cumul_caserate))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed cases\nper 100,000 population", breaks=c(0,500,1000,1500))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLACasesHeatmapAbs.tiff", units="in", width=16, height=16, res=500)
plot_grid(abscasetiles, abscasebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLACasesHeatmapAbs.png", units="in", width=16, height=16, res=500)
plot_grid(abscasetiles, abscasebars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot absolute case rate trajectories
ratetiles <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxcaseday), fill=avgcaserates))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 case rates in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases per 100,000 population within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of cases. Bars on the right represent the total population of each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(1.2)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
ratebars <- ggplot(subset(heatmap, date==maxcaseday), aes(x=pop, y=fct_reorder(name, maxcaseday), fill=pop))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Population")+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLARateHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(ratetiles, ratebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLARateHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(ratetiles, ratebars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot absolute death trajectories
death <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxdeathsday), fill=deathsroll_avg))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c("2020-03-06", plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 deaths in hospital in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of daily confirmed deaths within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of deaths Bars on the right represent the cumulative number of cases per 100,000 population in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Public Health England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
deathbars <- ggplot(subset(heatmap, date==maxdeathsday), aes(x=cumul_deathrate, y=fct_reorder(name, maxdeathsday), fill=cumul_deathrate))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed deaths\nper 100,000 population", breaks=c(0,50,100))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLADeathsHeatmapAbs.tiff", units="in", width=16, height=16, res=500)
plot_grid(death, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLADeathsHeatmapAbs.png", units="in", width=16, height=16, res=500)
plot_grid(death, deathbars, align="h", rel_widths=c(1,0.2))
dev.off()
#Plot death rate trajectories
deathrate <- ggplot(heatmap, aes(x=date, y=fct_reorder(name, maxdeathsday), fill=deathsroll_avg*100000/pop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c("2020-03-06", plotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 death rates in hospitals in English Local Authorities",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of daily confirmed deaths per 100,000 within each Local Authority.\nLAs are ordered by the date at which they reached their peak number of deaths Bars on the right represent the cumulative number of cases per 100,000 population in each LA.\nData updated to ", plotto, ". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from NHS England | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text=element_text(colour="Black"), plot.title=element_text(size=rel(2.3)))
deathratebars <- ggplot(subset(heatmap, date==maxdeathsday), aes(x=pop, y=fct_reorder(name, maxdeathsday), fill=pop))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Population")+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDLADeathsRateHeatmap.tiff", units="in", width=16, height=16, res=500)
plot_grid(deathrate, deathratebars, align="h", rel_widths=c(1,0.2))
dev.off()
png("Outputs/COVIDLADeathsRateHeatmap.png", units="in", width=16, height=16, res=500)
plot_grid(deathrate, deathratebars, align="h", rel_widths=c(1,0.2))
dev.off()
##########################
#Map of case trajectories#
##########################
#Download shapefile of LA boundaries
temp <- tempfile()
temp2 <- tempfile()
source <- "https://opendata.arcgis.com/datasets/6638c31a8e9842f98a037748f72258ed_0.zip?outSR=%7B%22latestWkid%22%3A27700%2C%22wkid%22%3A27700%7D"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
unzip(zipfile=temp, exdir=temp2)
#The actual shapefile has a different name each time you download it, so need to fish the name out of the unzipped file
name <- list.files(temp2, pattern=".shp")
shapefile <- st_read(file.path(temp2, name))
names(shapefile)[names(shapefile) == "ctyua17cd"] <- "code"
simplemap <- ms_simplify(shapefile, keep=0.2, keep_shapes = TRUE)
#Duplicate data to account for shapefile using pre-2019 codes
int1 <- filter(heatmap, name=="Bournemouth, Christchurch and Poole")
int1$code <- "E06000028"
int2 <- filter(heatmap, name=="Bournemouth, Christchurch and Poole")
int2$code <- "E06000029"
int3 <- filter(heatmap, name=="Bournemouth, Christchurch and Poole")
int3$code <- "E10000009"
temp <- rbind(heatmap, int1, int2, int3)
#Calculate change in cases in the past week
change <- temp %>%
mutate(change=casesroll_avg-lag(casesroll_avg,7))
#Exclude most recent day as reporting is usually very incomplete
change <- subset(change, date==max-3)
map.change <- full_join(simplemap, change, by="code", all.y=TRUE)
map.change <- map.change %>% drop_na("maxcaseprop")
#Map of past week changes
changemap <- ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)),
name="Change in case numbers\nin the past week", breaks=c(-60,-30, 0,30,60),
labels=c("-60", "-30", "0", "+30", "+60"),direction=-1)+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.subtitle=element_markdown())+
labs(title="Recent changes in COVID-19 case numbers across England",
subtitle="<span style='color:Grey50;'>Has the 7-day rolling average of case numbers <span style='color:#854B01FF;'>risen<span style='color:Grey50;'> or <span style='color:#014380FF;'>fallen<span style='color:Grey50;'> in the past week?<br>Areas with 0 cases shown in <span style='color:#41ab5d;'>green",
caption="Data from Public Health England | Plot by @VictimOfMaths")+
geom_rect(aes(xmin=500000, xmax=560000, ymin=156000, ymax=200000), fill="transparent",
colour="gray50")+
geom_rect(aes(xmin=310000, xmax=405000, ymin=370000, ymax=430000), fill="transparent",
colour="gray50")+
geom_rect(aes(xmin=405000, xmax=490000, ymin=505000, ymax=580000), fill="transparent",
colour="gray50")
#Add zoomed in areas
#London
London <- ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA, show.legend=FALSE)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
scale_x_continuous(limits=c(500000,560000), expand=c(0,0))+
scale_y_continuous(limits=c(156000,200000), expand=c(0,0))+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)), direction=-1)+
labs(title="Greater London")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(size=rel(0.9)))
#North-West England
NWEng <-ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA, show.legend=FALSE)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
scale_x_continuous(limits=c(310000,405000), expand=c(0,0))+
scale_y_continuous(limits=c(370000,430000), expand=c(0,0))+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)), direction=-1)+
labs(title="The North West")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(size=rel(0.9)))
#Tyne/Tees
NEEng <- ggplot()+
geom_sf(data=map.change, aes(geometry=geometry, fill=change), colour=NA, show.legend=FALSE)+
geom_sf(data=subset(map.change, casesroll_avg==0), aes(geometry=geometry), fill="#41ab5d", colour=NA)+
scale_x_continuous(limits=c(405000,490000), expand=c(0,0))+
scale_y_continuous(limits=c(505000,580000), expand=c(0,0))+
theme_classic()+
scale_fill_paletteer_c("scico::roma", limit=c(-1,1)*max(abs(map.change$change)), direction=-1)+
labs(title="The North East")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(size=rel(0.9)))
tiff("Outputs/COVIDChangesmapEng.tiff", units="in", width=9, height=11, res=500)
ggdraw()+
draw_plot(changemap)+
draw_plot(London, 0.01,0.34,0.32,0.21)+
draw_plot(NWEng, 0.01,0.57, 0.32, 0.24)+
draw_plot(NEEng, 0.57, 0.62, 0.22, 0.22)
dev.off()
#For animation
map.data <- full_join(simplemap, temp, by="code", all.y=TRUE)
#remove areas with no HLE data (i.e. Scotland, Wales & NI)
map.data <- map.data %>% drop_na("maxcaseprop")
#Animation of case trajectories
CaseAnim <- ggplot(subset(map.data, date>as.Date("2020-02-25")), aes(geometry=geometry, fill=maxcaseprop))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily cases as a %\nof peak cases", breaks=c(0,0.25,0.5,0.75,1),
labels=c("0%", "25%", "50%", "75%", "100%"))+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed cases coloured relative to the\npeak in each Local Authority (i.e. dark red represents the peak of new cases).\nDate: {frame_time}",
caption="Data from Public Health England | Visualisation by @VictimOfMaths")
animate(CaseAnim, duration=25, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/CaseAnim.gif"), end_pause=60)
#Animation of death trajectories
DeathAnim <- ggplot(subset(map.data, date>as.Date("2020-03-03")), aes(geometry=geometry, fill=maxdeathprop))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily deaths as a %\nof peak deaths", breaks=c(0,0.25,0.5,0.75,1),
labels=c("0%", "25%", "50%", "75%", "100%"))+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed deaths coloured relative to the\npeak in each Local Authority (i.e. dark red represents the peak in deaths).\nDate: {frame_time}",
caption="Data from NHS England | Visualisation by @VictimOfMaths")
animate(DeathAnim, duration=18, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/DeathAnim.gif"), end_pause=60)
#Animation of absolute case numbers
CaseAnimAbs <- ggplot(subset(map.data, date>as.Date("2020-02-25")), aes(geometry=geometry, fill=casesroll_avg))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily confirmed cases", na.value="white")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed cases.\nDate: {frame_time}",
caption="Data from Public Health England | Visualisation by @VictimOfMaths")
animate(CaseAnimAbs, duration=25, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/CaseAnimAbs.gif"), end_pause=60)
#Animation of death rates
DeathRateAnim <- ggplot(subset(map.data, date>as.Date("2020-03-03")), aes(geometry=geometry, fill=deathsroll_avg*100000/pop))+
geom_sf(colour=NA)+
xlim(10000,655644)+
ylim(5337,700000)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Daily deaths\nper 100,000")+
theme(axis.line=element_blank(), axis.ticks=element_blank(), axis.text=element_blank(),
axis.title=element_blank(), plot.title=element_text(face="bold"))+
transition_time(date)+
labs(title="Visualising the spread of the pandemic across England",
subtitle="Rolling 7-day average number of new confirmed COVID-19 deaths in hospitals per 100,000\nDate: {frame_time}",
caption="Data from NHS England | Visualisation by @VictimOfMaths")
animate(DeathRateAnim, duration=18, fps=10, width=2000, height=3000, res=300, renderer=gifski_renderer("Outputs/DeathRateAnim.gif"), end_pause=60)
#Quick analysis of potential COVID 'bumps'
temp1 <- subset(heatmap, name %in% c("Dorset", "Cornwall and Isles of Scilly", "Devon", "Bournemouth, Christchurch and Poole",
"West Sussex", "East Sussex", "Brighton and Hove"))
tiff("Outputs/COVIDSouthCoast.tiff", units="in", width=8, height=4, res=500)
ggplot(subset(temp1, date>"2020-05-01"), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="No clear signs of a rise in cases after the sunny May weather",
subtitle="7-day rolling average of new confirmed COVID-19 cases",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
temp2 <- subset(heatmap, name %in% c("Islington", "Camden", "Hackney", "Southwark", "Tower Hamlets",
"Lambeth", "Lewisham", "Haringey", "Westminster", "Kensington and Chelsea",
"Hammersmith and Fulham", "Wandsworth", "Lewisham", "Newham"))
tiff("Outputs/COVIDLondon.tiff", units="in", width=8, height=5, res=500)
ggplot(subset(temp2, date>"2020-05-01"), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="No clear evidence of a rise in cases after the protests in Central London",
subtitle="7-day rolling average of new confirmed COVID-19 cases",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
temp3 <- subset(heatmap, name %in% c("Leicester", "Bedford", "Barnsley", "Rotherham",
"Kirklees", "Bradford", "Rochdale", "Oldham",
"Tameside", "Blackburn with Darwen"))
tiff("Outputs/COVIDPillarsHeatmap.tiff", units="in", width=10, height=5, res=500)
ggplot(subset(temp3, date>"2020-05-01"), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="Mixed Pillar 1 trajectories in areas with high combined Pillar 1 and 2 tests in week 25",
subtitle="7-day rolling average of new confirmed COVID-19 cases",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
#Graph of pillar 1 tests in any LA you like
LA <- "Bradford"
tiff(paste0("Outputs/COVIDNewCases", LA, ".tiff"), units="in", width=8, height=6, res=500)
ggplot()+
geom_col(data=subset(heatmap, name==LA), aes(x=date, y=cases), fill="skyblue2")+
geom_line(data=subset(heatmap, name==LA & date<max-1), aes(x=date, y=casesroll_avg), colour="red")+
scale_x_date(name="Date")+
scale_y_continuous("New COVID-19 cases")+
theme_classic()+
theme(plot.subtitle=element_markdown())+
labs(title=paste0("Confirmed new COVID cases in ",LA),
subtitle="Confirmed new COVID-19 cases identified through combined pillar 1 & 2 testing and the <span style='color:Red;'>7-day rolling average",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
#New lockdown areas
LA <- c("Calderdale", "Blackburn with Darwen", "Leicester", "Bury", "Oldham", "Manchester",
"Salford", "Rochdale", "Stockport", "Tameside", "Trafford", "Wigan", "Bolton",
"Kirklees", "Lancashire")
tiff("Outputs/COVIDNewLockdown.tiff", units="in", width=10, height=5, res=500)
ggplot(subset(heatmap, name %in% LA), aes(x=date, y=name, fill=avgcaserates))+
geom_tile(colour="white")+
geom_segment(aes(x=as.Date("2020-06-29"), xend=as.Date("2020-06-29"), y=0, yend=16), colour="NavyBlue", linetype=2)+
geom_segment(aes(x=as.Date("2020-07-31"), xend=as.Date("2020-07-31"), y=0, yend=16), colour="Red", linetype=2)+
scale_fill_distiller(palette="Spectral", name="New cases\nper 100,000")+
scale_x_date(name="Date")+
scale_y_discrete(name="")+
theme_classic()+
labs(title="Trajectories of COVID cases in areas with second lockdown restrictions",
subtitle="7-day rolling average of new confirmed COVID-19 cases per 100,000 inhabitants",
caption="Data from PHE | Plot by @VictimOfMaths")
dev.off()
|
##################################################################
##################################################################
## functional linear model
##################################################################
##################################################################
SL.flm <- function(Y, X, newX, family, obsWeights, id, ...){
stopifnot(require(mgcv))
if(family$family == 'gaussian'){
X <- as.matrix(X)
newX <- as.matrix(newX)
ntrain <- nrow(X)
ntest <- nrow(newX)
freq <- matrix(1:ncol(X), ncol=ncol(X), nrow=ntrain, byrow=TRUE)
m <- try(gam(Y ~ s(freq, by=X, k=min(nrow(X)-1, 40), bs="ad")))
if(class(m)[1] != "try-error"){
freq <- matrix(1:ncol(X), ncol=ncol(X), nrow=ntest, byrow=TRUE)
pred <- predict(m, newdata=list(X=newX, freq=freq))
} else {
pred = rep(NA, nrow(newX))
}
}
if(family$family == 'binomial'){
stop("Only gaussian outcomes allowed")
}
fit = vector("list", length = 0)
class(fit) <- 'SL.template'
out <- list(pred = pred, fit = fit)
return(out)
} | /SL-fda/SL.flm.R | no_license | ecpolley/SuperLearnerExtra | R | false | false | 1,099 | r | ##################################################################
##################################################################
## functional linear model
##################################################################
##################################################################
SL.flm <- function(Y, X, newX, family, obsWeights, id, ...){
stopifnot(require(mgcv))
if(family$family == 'gaussian'){
X <- as.matrix(X)
newX <- as.matrix(newX)
ntrain <- nrow(X)
ntest <- nrow(newX)
freq <- matrix(1:ncol(X), ncol=ncol(X), nrow=ntrain, byrow=TRUE)
m <- try(gam(Y ~ s(freq, by=X, k=min(nrow(X)-1, 40), bs="ad")))
if(class(m)[1] != "try-error"){
freq <- matrix(1:ncol(X), ncol=ncol(X), nrow=ntest, byrow=TRUE)
pred <- predict(m, newdata=list(X=newX, freq=freq))
} else {
pred = rep(NA, nrow(newX))
}
}
if(family$family == 'binomial'){
stop("Only gaussian outcomes allowed")
}
fit = vector("list", length = 0)
class(fit) <- 'SL.template'
out <- list(pred = pred, fit = fit)
return(out)
} |
#Monte Carlo Integration 1-Dimension
#Goal: Write a Monte Carlo integrator for a 1D Function
# (1) Calculus (By Hand)
# (2) Numerical Integration (R Tools)
# (3) Monte Carlo Integration
#Function Under Consideration:
#f(x) = 3 x^2 + 1
#Interval: 1 to 3
integrand <- function(x) {sin(x)} #<-- Will change based on function
minX = 0;
maxX = 1;
minY = 0;
maxY = 100;
#####################
# Plot the Function #
#####################
par(mfrow=c(1,2)) # set the plotting area into a 1*2 array
x <- seq(minX,maxX,0.1)
plot(x, integrand(x),type = 'l',
col='black',main="f(x) = 1/x^2(x^2+25)",ylab="f(x)")
################################
# (1) Calculus (i.e., By Hand) #
################################
antiDerivative <- function(x) {-cos(x)} #<--Will Need to Change
calcIntegral = antiDerivative(maxX) - antiDerivative(minX);
cat("Calculus (By Hand) Integral", "\t= ",calcIntegral,"\n")
#############################
# (2) Numerical Integration #
#############################
soln = integrate(integrand, lower = minX, upper = maxX)
numericalIntegral = soln$value;
cat("Numerical Integration", "\t\t= ",numericalIntegral,"\n")
##############################
# (3) Monte Carlo Integrator #
##############################
maxSamples = 50000;
xVals=runif(maxSamples,minX,maxX);
yVals=runif(maxSamples,minY,maxY);
runningIntegral = rep(0,maxSamples);
boxArea = (maxX - minX)*(maxY - minY);
numPointsUnderCurve = 0;
for (j in seq(from=1,to=maxSamples,by=1))
{
if(yVals[j] < integrand(xVals[j])){
numPointsUnderCurve = numPointsUnderCurve +1;
points(xVals[j],yVals[j],col='red')
}
else{
points(xVals[j],yVals[j],col='blue')
}
runningIntegral[j] = (numPointsUnderCurve/j)*boxArea
}
plot(seq(1,maxSamples,1),abs(runningIntegral-numericalIntegral)/numericalIntegral,
type = 'l',col='black',
main="Relative Error in Integral",ylab="Error",
xlab="Num Samples")
monteCarloIntegral = (numPointsUnderCurve/maxSamples)*boxArea;
cat("Monte Carlo Integration", "\t= ",monteCarloIntegral,"\n")
| /Lab_07/monteCarlo1D.R | no_license | ifelsejet/MATH-032 | R | false | false | 2,063 | r | #Monte Carlo Integration 1-Dimension
#Goal: Write a Monte Carlo integrator for a 1D Function
# (1) Calculus (By Hand)
# (2) Numerical Integration (R Tools)
# (3) Monte Carlo Integration
#Function Under Consideration:
#f(x) = 3 x^2 + 1
#Interval: 1 to 3
integrand <- function(x) {sin(x)} #<-- Will change based on function
minX = 0;
maxX = 1;
minY = 0;
maxY = 100;
#####################
# Plot the Function #
#####################
par(mfrow=c(1,2)) # set the plotting area into a 1*2 array
x <- seq(minX,maxX,0.1)
plot(x, integrand(x),type = 'l',
col='black',main="f(x) = 1/x^2(x^2+25)",ylab="f(x)")
################################
# (1) Calculus (i.e., By Hand) #
################################
antiDerivative <- function(x) {-cos(x)} #<--Will Need to Change
calcIntegral = antiDerivative(maxX) - antiDerivative(minX);
cat("Calculus (By Hand) Integral", "\t= ",calcIntegral,"\n")
#############################
# (2) Numerical Integration #
#############################
soln = integrate(integrand, lower = minX, upper = maxX)
numericalIntegral = soln$value;
cat("Numerical Integration", "\t\t= ",numericalIntegral,"\n")
##############################
# (3) Monte Carlo Integrator #
##############################
maxSamples = 50000;
xVals=runif(maxSamples,minX,maxX);
yVals=runif(maxSamples,minY,maxY);
runningIntegral = rep(0,maxSamples);
boxArea = (maxX - minX)*(maxY - minY);
numPointsUnderCurve = 0;
for (j in seq(from=1,to=maxSamples,by=1))
{
if(yVals[j] < integrand(xVals[j])){
numPointsUnderCurve = numPointsUnderCurve +1;
points(xVals[j],yVals[j],col='red')
}
else{
points(xVals[j],yVals[j],col='blue')
}
runningIntegral[j] = (numPointsUnderCurve/j)*boxArea
}
plot(seq(1,maxSamples,1),abs(runningIntegral-numericalIntegral)/numericalIntegral,
type = 'l',col='black',
main="Relative Error in Integral",ylab="Error",
xlab="Num Samples")
monteCarloIntegral = (numPointsUnderCurve/maxSamples)*boxArea;
cat("Monte Carlo Integration", "\t= ",monteCarloIntegral,"\n")
|
#############################
##
# Author: Gina Nichols
#
# Date Created: Oct 22
#
# Date last modified: Oct 30 2018 - by Andrea
# Nov 1 2018 - Gina, added biomass separated by following crop graph
# Oct 29 2019 - updated code, new database
#
# Purpose: Create figures for ASA presentation
#
# Inputs: td_cc-database-clean
#
# Outputs: several figs
#
# NOtes:
#
##############################
rm(list=ls())
library(tidyverse)
library(usmap) # pre-loaded maps package
library(ggridges) # makes nice distribution plots
library(ggpubr) # has nice themes
# Read in data ------------------------------------------------------------
dat <- read_csv("_tidydata/td_cc-database-clean-long.csv")%>%
mutate(tmeth_nice = ifelse(ccterm_meth == "W", "Winterkill", NA),
tmeth_nice = ifelse(ccterm_meth == "M", "Mechanical", tmeth_nice),
tmeth_nice = ifelse(ccterm_meth == "H", "Herbicide", tmeth_nice),
tmeth_nice = ifelse(ccterm_meth) == "D", "Combo Mech/Herb", tmeth_nice)
# Fig 6 - WW/LL -----------------------------------------------------
lunchdat <-
dat %>%
filter(!is.na(LRR)) %>%
select(obs_no, yieldLRR, LRR) %>%
rename(weedLRR = LRR)
lunchdat %>% write_csv("_tidydata/sd_yieldLRR-weedLRR.csv")
# NOTE: NAs indicate weed points without yields
(dat_lab6 <-
dat %>%
filter(!is.na(LRR)) %>%
mutate(ww_color = ifelse( (LRR >= 0 & yieldLRR >= 0 ), "Mw-My", NA),
ww_color = ifelse( (LRR >= 0 & yieldLRR < 0 ), "Mw-Ly", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR < 0 ), "Lw-Ly", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR >= 0 ), "Lw-My", ww_color)) %>%
group_by(ww_color) %>%
summarise(n = n()) )
# Reduced weeds # of points
lessWmoreY <- as.numeric(dat_lab6 %>% filter(ww_color == "Lw-My") %>% pull(n))
lessWlessY <- as.numeric(dat_lab6 %>% filter(ww_color == "Lw-Ly") %>% pull(n))
# Inc weeds
moreWlessY <- as.numeric(dat_lab6 %>% filter(ww_color == "Mw-Ly") %>% pull(n))
moreWmoreY <- as.numeric(dat_lab6 %>% filter(ww_color == "Mw-My") %>% pull(n))
dat %>%
mutate(ww_color = ifelse( (LRR > 0 & yieldLRR > 0 ), "LL", NA),
ww_color = ifelse( (LRR > 0 & yieldLRR < 0 ), "LW", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR < 0 ), "WW", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR > 0 ), "WL", ww_color)) %>%
group_by(ww_color) %>%
mutate(n = n()) %>%
# Remove that one stinker point, just for graphing
filter(yieldLRR > -4) %>%
ggplot(aes(yieldLRR, LRR)) +
geom_point(aes(color = ww_color), size = 5) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
coord_cartesian(xlim = c(-3, 3), ylim = c(-5, 5)) +
geom_text(x = -3, y = 4, label = paste0("(-) yield (+) weeds\n n = ", moreWlessY), hjust = 0, size = 6) +
geom_text(x = -3, y = -4, label = paste0("(-) yield (-) weeds\n n = ", lessWlessY), hjust = 0, size = 6) +
geom_text(x = 3, y = 4, label = paste0("(+) yield (+) weeds\n n = ", moreWmoreY), hjust = 1, size = 6) +
geom_text(x = 3, y = -4, label = paste0("(+) yield (-) weeds\n n = ", lessWmoreY), hjust = 1, size = 6) +
guides(color = F) +
labs(x = "Yield Response to Cover Crop", y = "Weed Response to Cover Crop") +
scale_color_manual(values = c("gray", "red", "green3", "gray")) +
theme_classic() +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-Win-Win-scenariosv2.png")
# Fig 7 - cc biomass vs RR -----------------------------------------------------
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, fill = "green4") +
# geom_smooth(method = "lm", se = F, color = "red", size = 4) +
# geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_classic() +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-CCbiomass-vs-LRR.png")
# Separate by cc type
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
filter(cc_type %in% c("grass", "legume")) %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, aes(fill= cc_type)) +
geom_smooth(method = "lm", se = F, color = "red", size = 3) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_light() +
facet_grid(cc_type~resp) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
#ggsave("../_figs/data-CCbiomass-vs-LRR-by-resp.png")
# Separate by crop_follow AND cc_type - All grasses are followed by soybeans
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
filter(crop_follow %in% c("corn", "soybean")) %>%
filter(cc_type %in% c("legume", "grass")) %>%
mutate(crop_follow = recode(crop_follow,
corn = "Maize",
soybean = "Soybean")) %>%
ggplot(aes(cc_bio_kgha, LRR, fill = cc_type)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 22) +
#geom_smooth(method = "lm", se = F, color = "red", size = 3) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop",
title = "All Grass Cover Crops are Followed By Soybeans") +
theme_bw() +
scale_fill_manual(values = c("green3", "blue3"),
name = "CC-Type") +
#scale_shape_manual(values = c(22, 21)) +
# labels = c("legume.Maize", "grass.Soybean", "legume.Soybean"),
# name = "CC-Type.Following-Crop") +
#
#
facet_grid(.~crop_follow) +
guides(shape = F) +
theme(axis.text = element_text(size = rel(1.2)),
axis.title = element_text(size = rel(1.4)),
strip.text = element_text(size = rel(1.5)),
legend.text = element_text(size = rel(1.4)),
legend.title = element_text(size = rel(1.5)))
ggsave("../_figs/data-CCbiomass-vs-LRR-by-cropfollow-and-cctype-v2.png")
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, fill = "green4") +
geom_smooth(method = "lm", se = F, color = "red", size = 4) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_classic() +
facet_grid(.~cc_type) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-CCbiomass-vs-LRR-by-cctype.png")
##AB changes
##removing Fisk study with low biomass numbers
dat2 <- dat[!dat$study ==5, ]
##for crop following, removing NAs and corn/soy groups - studies 11, 12, 13
dat2 <- dat2[!dat2$study ==11, ]
dat2 <- dat2[!dat2$study ==12, ]
dat2 <- dat2[!dat2$study ==13, ]
#remove outliers
dat2 <- dat2[!dat2$obs_no ==137, ]
dat2 <- dat2[!dat2$obs_no ==49, ]
dat2 <- dat2[!dat2$obs_no ==51, ]
#this was the only way I could keep the dataframe from adding random NAs
dat2 %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, fill = "green4") +
geom_smooth(method = "lm", se = F, color = "red", size = 4) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_classic() +
facet_grid(.~crop_follow) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-CCbiomass-vs-LRR-by-cropfollow.png")
| /code/old/code_CCmeta_figs-for-Lunchinators.R | no_license | spoicts/ccweedmeta-analysis | R | false | false | 7,809 | r | #############################
##
# Author: Gina Nichols
#
# Date Created: Oct 22
#
# Date last modified: Oct 30 2018 - by Andrea
# Nov 1 2018 - Gina, added biomass separated by following crop graph
# Oct 29 2019 - updated code, new database
#
# Purpose: Create figures for ASA presentation
#
# Inputs: td_cc-database-clean
#
# Outputs: several figs
#
# NOtes:
#
##############################
rm(list=ls())
library(tidyverse)
library(usmap) # pre-loaded maps package
library(ggridges) # makes nice distribution plots
library(ggpubr) # has nice themes
# Read in data ------------------------------------------------------------
dat <- read_csv("_tidydata/td_cc-database-clean-long.csv")%>%
mutate(tmeth_nice = ifelse(ccterm_meth == "W", "Winterkill", NA),
tmeth_nice = ifelse(ccterm_meth == "M", "Mechanical", tmeth_nice),
tmeth_nice = ifelse(ccterm_meth == "H", "Herbicide", tmeth_nice),
tmeth_nice = ifelse(ccterm_meth) == "D", "Combo Mech/Herb", tmeth_nice)
# Fig 6 - WW/LL -----------------------------------------------------
lunchdat <-
dat %>%
filter(!is.na(LRR)) %>%
select(obs_no, yieldLRR, LRR) %>%
rename(weedLRR = LRR)
lunchdat %>% write_csv("_tidydata/sd_yieldLRR-weedLRR.csv")
# NOTE: NAs indicate weed points without yields
(dat_lab6 <-
dat %>%
filter(!is.na(LRR)) %>%
mutate(ww_color = ifelse( (LRR >= 0 & yieldLRR >= 0 ), "Mw-My", NA),
ww_color = ifelse( (LRR >= 0 & yieldLRR < 0 ), "Mw-Ly", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR < 0 ), "Lw-Ly", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR >= 0 ), "Lw-My", ww_color)) %>%
group_by(ww_color) %>%
summarise(n = n()) )
# Reduced weeds # of points
lessWmoreY <- as.numeric(dat_lab6 %>% filter(ww_color == "Lw-My") %>% pull(n))
lessWlessY <- as.numeric(dat_lab6 %>% filter(ww_color == "Lw-Ly") %>% pull(n))
# Inc weeds
moreWlessY <- as.numeric(dat_lab6 %>% filter(ww_color == "Mw-Ly") %>% pull(n))
moreWmoreY <- as.numeric(dat_lab6 %>% filter(ww_color == "Mw-My") %>% pull(n))
dat %>%
mutate(ww_color = ifelse( (LRR > 0 & yieldLRR > 0 ), "LL", NA),
ww_color = ifelse( (LRR > 0 & yieldLRR < 0 ), "LW", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR < 0 ), "WW", ww_color),
ww_color = ifelse( (LRR < 0 & yieldLRR > 0 ), "WL", ww_color)) %>%
group_by(ww_color) %>%
mutate(n = n()) %>%
# Remove that one stinker point, just for graphing
filter(yieldLRR > -4) %>%
ggplot(aes(yieldLRR, LRR)) +
geom_point(aes(color = ww_color), size = 5) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
coord_cartesian(xlim = c(-3, 3), ylim = c(-5, 5)) +
geom_text(x = -3, y = 4, label = paste0("(-) yield (+) weeds\n n = ", moreWlessY), hjust = 0, size = 6) +
geom_text(x = -3, y = -4, label = paste0("(-) yield (-) weeds\n n = ", lessWlessY), hjust = 0, size = 6) +
geom_text(x = 3, y = 4, label = paste0("(+) yield (+) weeds\n n = ", moreWmoreY), hjust = 1, size = 6) +
geom_text(x = 3, y = -4, label = paste0("(+) yield (-) weeds\n n = ", lessWmoreY), hjust = 1, size = 6) +
guides(color = F) +
labs(x = "Yield Response to Cover Crop", y = "Weed Response to Cover Crop") +
scale_color_manual(values = c("gray", "red", "green3", "gray")) +
theme_classic() +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-Win-Win-scenariosv2.png")
# Fig 7 - cc biomass vs RR -----------------------------------------------------
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, fill = "green4") +
# geom_smooth(method = "lm", se = F, color = "red", size = 4) +
# geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_classic() +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-CCbiomass-vs-LRR.png")
# Separate by cc type
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
filter(cc_type %in% c("grass", "legume")) %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, aes(fill= cc_type)) +
geom_smooth(method = "lm", se = F, color = "red", size = 3) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_light() +
facet_grid(cc_type~resp) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
#ggsave("../_figs/data-CCbiomass-vs-LRR-by-resp.png")
# Separate by crop_follow AND cc_type - All grasses are followed by soybeans
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
filter(crop_follow %in% c("corn", "soybean")) %>%
filter(cc_type %in% c("legume", "grass")) %>%
mutate(crop_follow = recode(crop_follow,
corn = "Maize",
soybean = "Soybean")) %>%
ggplot(aes(cc_bio_kgha, LRR, fill = cc_type)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 22) +
#geom_smooth(method = "lm", se = F, color = "red", size = 3) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop",
title = "All Grass Cover Crops are Followed By Soybeans") +
theme_bw() +
scale_fill_manual(values = c("green3", "blue3"),
name = "CC-Type") +
#scale_shape_manual(values = c(22, 21)) +
# labels = c("legume.Maize", "grass.Soybean", "legume.Soybean"),
# name = "CC-Type.Following-Crop") +
#
#
facet_grid(.~crop_follow) +
guides(shape = F) +
theme(axis.text = element_text(size = rel(1.2)),
axis.title = element_text(size = rel(1.4)),
strip.text = element_text(size = rel(1.5)),
legend.text = element_text(size = rel(1.4)),
legend.title = element_text(size = rel(1.5)))
ggsave("../_figs/data-CCbiomass-vs-LRR-by-cropfollow-and-cctype-v2.png")
dat %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, fill = "green4") +
geom_smooth(method = "lm", se = F, color = "red", size = 4) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_classic() +
facet_grid(.~cc_type) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-CCbiomass-vs-LRR-by-cctype.png")
##AB changes
##removing Fisk study with low biomass numbers
dat2 <- dat[!dat$study ==5, ]
##for crop following, removing NAs and corn/soy groups - studies 11, 12, 13
dat2 <- dat2[!dat2$study ==11, ]
dat2 <- dat2[!dat2$study ==12, ]
dat2 <- dat2[!dat2$study ==13, ]
#remove outliers
dat2 <- dat2[!dat2$obs_no ==137, ]
dat2 <- dat2[!dat2$obs_no ==49, ]
dat2 <- dat2[!dat2$obs_no ==51, ]
#this was the only way I could keep the dataframe from adding random NAs
dat2 %>%
gather(bioLRR:denLRR, key = "resp", value = "LRR") %>%
ggplot(aes(cc_bio_kgha, LRR)) +
geom_hline(yintercept = 0) +
geom_point(size = 5, pch = 21, fill = "green4") +
geom_smooth(method = "lm", se = F, color = "red", size = 4) +
#geom_vline(xintercept = 0) +
labs(x = "Cover Crop Biomass [kg/ha]", y = "Weed Response to Cover Crop") +
theme_classic() +
facet_grid(.~crop_follow) +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16))
ggsave("../_figs/data-CCbiomass-vs-LRR-by-cropfollow.png")
|
library(dyngen.manuscript)
library(tidyverse)
library(dtw)
library(ggbeeswarm)
library(viridis)
exp <- start_analysis("usecase_trajectory_alignment")
# For each pair, use 3 different kind of processing: smoothing, subsampling, original cells
design_smoothing <- exp$result("design_smoothing.rds") %cache% {
read_rds(exp$result("design_datasets.rds")) %>%
select("base_id1","base_id2", "id1", "id2", "noise") %>%
mutate("smooth" = c(rep("smoothed", 20), rep("subsampled", 40), rep("original cells", 40))) %>%
expand(nesting(base_id1, base_id2, id1, id2, noise), smooth)
}
alignment_results <- pmap(design_smoothing %>% mutate(rn = row_number()),
function(base_id1, base_id2, id1, id2, smooth, rn, noise) {
dataset1 <- read_rds(exp$dataset_file(id1))
dataset2 <- read_rds(exp$dataset_file(id2))
if(smooth == "smoothed"){
res1 <- get_waypoint_expression(dataset1, 100)
res2 <- get_waypoint_expression(dataset2, 100)
} else {
res1 <- get_cell_expression(dataset1)
res2 <- get_cell_expression(dataset2)
}
pt1 <- res1$pseudotime
pt2 <- res2$pseudotime
expr1 <- res1$expression
expr2 <- res2$expression
if(smooth == "subsampled"){
smp1 <- seq(from = 1, to = 1000, by = 10)
smp2 <- seq(from = 1, to = 1000, by = 10)
pt1_smp <- pt1[smp1]
pt2_smp <- pt2[smp2]
expr1_smp <- expr1[names(pt1_smp),]
expr2_smp <- expr2[names(pt2_smp),]
dtw_alignment <- dtw(expr2_smp, expr1_smp, step.pattern=symmetric2, keep.internals=T)
pt1_aligned_smp <- pt1_smp[dtw_alignment$index2]
pt2_aligned_smp <- pt2_smp[dtw_alignment$index1]
res <- sum(abs(pt1_aligned_smp - pt2_aligned_smp))
cat(id1, "&", id2, "&", smooth, "=", res, "\n", sep=" ")
return(res)
} else {
dtw_alignment <- dtw(expr2, expr1, step.pattern=symmetric2, keep.internals=T)
pt1_aligned <- pt1[dtw_alignment$index2]
pt2_aligned <- pt2[dtw_alignment$index1]
res <- sum(abs(pt1_aligned - pt2_aligned))
cat(id1, "&", id2, "&", smooth, "=", res, "\n", sep=" ")
return(res)
}
})
result_smoothing <- exp$result("result_smoothing.rds") %cache% {
design_smoothing %>% mutate(result = as.numeric(alignment_results))
}
| /analysis/usecase_trajectory_alignment/2_smoothing.R | no_license | PeterZZQ/dyngen_manuscript | R | false | false | 2,412 | r | library(dyngen.manuscript)
library(tidyverse)
library(dtw)
library(ggbeeswarm)
library(viridis)
exp <- start_analysis("usecase_trajectory_alignment")
# For each pair, use 3 different kind of processing: smoothing, subsampling, original cells
design_smoothing <- exp$result("design_smoothing.rds") %cache% {
read_rds(exp$result("design_datasets.rds")) %>%
select("base_id1","base_id2", "id1", "id2", "noise") %>%
mutate("smooth" = c(rep("smoothed", 20), rep("subsampled", 40), rep("original cells", 40))) %>%
expand(nesting(base_id1, base_id2, id1, id2, noise), smooth)
}
alignment_results <- pmap(design_smoothing %>% mutate(rn = row_number()),
function(base_id1, base_id2, id1, id2, smooth, rn, noise) {
dataset1 <- read_rds(exp$dataset_file(id1))
dataset2 <- read_rds(exp$dataset_file(id2))
if(smooth == "smoothed"){
res1 <- get_waypoint_expression(dataset1, 100)
res2 <- get_waypoint_expression(dataset2, 100)
} else {
res1 <- get_cell_expression(dataset1)
res2 <- get_cell_expression(dataset2)
}
pt1 <- res1$pseudotime
pt2 <- res2$pseudotime
expr1 <- res1$expression
expr2 <- res2$expression
if(smooth == "subsampled"){
smp1 <- seq(from = 1, to = 1000, by = 10)
smp2 <- seq(from = 1, to = 1000, by = 10)
pt1_smp <- pt1[smp1]
pt2_smp <- pt2[smp2]
expr1_smp <- expr1[names(pt1_smp),]
expr2_smp <- expr2[names(pt2_smp),]
dtw_alignment <- dtw(expr2_smp, expr1_smp, step.pattern=symmetric2, keep.internals=T)
pt1_aligned_smp <- pt1_smp[dtw_alignment$index2]
pt2_aligned_smp <- pt2_smp[dtw_alignment$index1]
res <- sum(abs(pt1_aligned_smp - pt2_aligned_smp))
cat(id1, "&", id2, "&", smooth, "=", res, "\n", sep=" ")
return(res)
} else {
dtw_alignment <- dtw(expr2, expr1, step.pattern=symmetric2, keep.internals=T)
pt1_aligned <- pt1[dtw_alignment$index2]
pt2_aligned <- pt2[dtw_alignment$index1]
res <- sum(abs(pt1_aligned - pt2_aligned))
cat(id1, "&", id2, "&", smooth, "=", res, "\n", sep=" ")
return(res)
}
})
result_smoothing <- exp$result("result_smoothing.rds") %cache% {
design_smoothing %>% mutate(result = as.numeric(alignment_results))
}
|
library(shiny)
library(knitr)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("Yellowstone Old Faithful Geyser Eruption Duration Prediction"),
sidebarPanel(
numericInput('wait',
'Enter geyser waiting time till next eruption',
80 , min = 45, max = 5000, step = 1),
selectInput("waitingTimeUnit", "Units:",
c("Minutes" = "Minutes",
"Seconds" = "Seconds")),
submitButton('Submit')
),
mainPanel(
tabsetPanel(
tabPanel("Prediction Results",
verbatimTextOutput("prediction")
),
tabPanel("Html Documentaion",
HTML(knitr::knit2html(text = readLines(normalizePath('index.Rmd')),
fragment.only = TRUE)))
)
)
)
)
| /ui.R | no_license | medmatix/ShinyApplication | R | false | false | 1,274 | r | library(shiny)
library(knitr)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("Yellowstone Old Faithful Geyser Eruption Duration Prediction"),
sidebarPanel(
numericInput('wait',
'Enter geyser waiting time till next eruption',
80 , min = 45, max = 5000, step = 1),
selectInput("waitingTimeUnit", "Units:",
c("Minutes" = "Minutes",
"Seconds" = "Seconds")),
submitButton('Submit')
),
mainPanel(
tabsetPanel(
tabPanel("Prediction Results",
verbatimTextOutput("prediction")
),
tabPanel("Html Documentaion",
HTML(knitr::knit2html(text = readLines(normalizePath('index.Rmd')),
fragment.only = TRUE)))
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot_cells}
\alias{plot_cells}
\title{Plot cells in reduced dimensionality 2D space}
\usage{
plot_cells(fce, features, ...)
}
\arguments{
\item{fce}{fce object of class MultiAssayExperiment}
\item{features}{features to plot, either gene names or columns in colData}
\item{...}{additional arguments to pass to plot_feature}
}
\description{
Plot cells in 2D. Cells can be colored by gene or feature in meta.data dataframe
}
| /man/plot_cells.Rd | permissive | mandylr/scrunchy | R | false | true | 516 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot_cells}
\alias{plot_cells}
\title{Plot cells in reduced dimensionality 2D space}
\usage{
plot_cells(fce, features, ...)
}
\arguments{
\item{fce}{fce object of class MultiAssayExperiment}
\item{features}{features to plot, either gene names or columns in colData}
\item{...}{additional arguments to pass to plot_feature}
}
\description{
Plot cells in 2D. Cells can be colored by gene or feature in meta.data dataframe
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22371540218325e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615769915-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22371540218325e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DEG.R
\name{wilcoxDEG}
\alias{wilcoxDEG}
\title{Wilcoxon test for each row of a dataframe}
\usage{
wilcoxDEG(data, n1, n2)
}
\arguments{
\item{data}{dataframe of gene expression levels: Gene names in rows, samples in columns.}
\item{n1}{Number of samples for the first experimental condition}
\item{n2}{Number of samples for the second experimental condition}
}
\value{
dataframe with pvalues in one column and rownames of data as rownames.
}
\description{
Testing, for one row at the time, if the first series of values are different, greater or less than the values
of the second condition with the \link[stats]{wilcox.test}
}
\examples{
# Import the dataset
Data = matrix(runif(5000, 10, 100), ncol=20)
group = paste0(rep(c("control", "case"), each = 10),rep(c(1:10),each = 1))
genes <- paste0(rep(LETTERS[1:25], each=10), rep(c(1:10),each = 1))
colnames(Data) = group
row.names(Data) = genes
# Compute Pvalues
res.DEG = wilcoxDEG(Data,10,10)
}
| /man/wilcoxDEG.Rd | no_license | acolajanni/GENEXPRESSO | R | false | true | 1,031 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DEG.R
\name{wilcoxDEG}
\alias{wilcoxDEG}
\title{Wilcoxon test for each row of a dataframe}
\usage{
wilcoxDEG(data, n1, n2)
}
\arguments{
\item{data}{dataframe of gene expression levels: Gene names in rows, samples in columns.}
\item{n1}{Number of samples for the first experimental condition}
\item{n2}{Number of samples for the second experimental condition}
}
\value{
dataframe with pvalues in one column and rownames of data as rownames.
}
\description{
Testing, for one row at the time, if the first series of values are different, greater or less than the values
of the second condition with the \link[stats]{wilcox.test}
}
\examples{
# Import the dataset
Data = matrix(runif(5000, 10, 100), ncol=20)
group = paste0(rep(c("control", "case"), each = 10),rep(c(1:10),each = 1))
genes <- paste0(rep(LETTERS[1:25], each=10), rep(c(1:10),each = 1))
colnames(Data) = group
row.names(Data) = genes
# Compute Pvalues
res.DEG = wilcoxDEG(Data,10,10)
}
|
#Author: Chathura J Gunasekara
#Version: 1
#data: June 20, 2017
#install.packages("doMC")
#install.packages("infotheo")
#install.packages(stats)
#install.packages(MASS)
#install.packages(iterators)
#install.packages(parallel)
#install.packages(foreach)
library("parallel")
library(stats)
library(MASS)
library(iterators)
library(parallel)
library(foreach)
library(doMC)
library(infotheo)
#Change following paramenters for input datasets
###############################################################
ncore = detectCores()
#pathway gene file name, the file must be .csv file
#sample input files are shown
#Pathway FILE
#Arabidopsis Lignin pathway
#pathway_file = "./input_data/Arabidopsis/pathways/lignin_pathway_data.csv"
#Arabidopsis Lignin pathway
#pathway_file = "./input_data/Arabidopsis/pathways/lignin_pathway_data.csv
#Mouse Pluripotency pathway
pathway_file = "./input_data/Mouse_pluripotency_maintenance/mouse_pathway_data.csv"
#TF File
#TF file name, the file must be .csv file
#Arabidopsis
#tf_file = "./input_data/Arabidopsis/Athaliana_AllTFs.csv"
#Mouse
#tf_file = "input_data/Mouse_pluripotency_maintenance/Dataset1/dataset1_135TFs.csv"
#tf_file = "input_data/Mouse_pluripotency_maintenance/Dataset2/dataset2_235TFs.csv"
tf_file = "input_data/Mouse_pluripotency_maintenance/Dataset3/dataset3_335TFs.csv"
#the format of gene expression data "samples_in_row" or "samples_in_column"
format = "samples_in_row"
#cutoff_pvalues
alpha1 = 0.05
alpha2 = 0.05
#output files
output_file1 = "./output_results/output_Ranked_TF_Frequency.csv"
output_file2 = "./output_results/output_network.csv"
output_file3 = "./output_results/output_combinatorial_TF.txt"
##############################################################
if (format == "samples_in_row") {
pathway.data = read.csv(pathway_file,header = TRUE)
tf.data = read.csv(tf_file,header = TRUE)
}else{
pathway.data = read.csv(
pathway_file,stringsAsFactors = FALSE,header = FALSE,row.names = 1
)
tf.data = read.csv(
tf_file,stringsAsFactors = FALSE,header = FALSE,row.names = 1
)
pathway.data = t(as.matrix(pathway.data))
tf.data = t(as.matrix(tf.data))
}
gn <-colnames(pathway.data)
tfn<-colnames(tf.data)
nsample=dim(tf.data)[1]
source("TGMI.R")
#X4CL1
#PAL1
#MYB85
#pw1<-as.numeric(pathway.data["X4CL1"]$X4CL1)
#pw2<-as.numeric(pathway.data["PAL1"]$PAL1)
#tf <-as.numeric(tf.data["MYB85"]$MYB85)
#func(pw1,pw2,tf)
results<-parallel_approach1(pathway.data,tf.data,gn,tfn)
write.csv(results,"./temp_output_all_combinations.csv",row.names = F)
###Output combinations Done###
results <- read.csv("./temp_output_all_combinations.csv",header=T)
results<-results[with(results, order(S7_div_123_pval)), ]
corr.pval<-p.adjust(results$S7_div_123_pval, method = "BH", n = length(results$S7_div_123_pval))
results$corrected.pval<-corr.pval
selected_results <-results[as.numeric(results$corrected.pval) < alpha1,]
pair_list<-rbind(data.frame(tf=selected_results$X,pw=selected_results$Y1,str=as.numeric(as.character(selected_results$S7_div_123))),
data.frame(tf=selected_results$X,pw=selected_results$Y2,str=as.numeric(as.character(selected_results$S7_div_123))))
#aggregate pair_list df to get average str for each pair
pair_list <- aggregate(pair_list[,3], list(pair_list$tf,pair_list$pw), mean)
colnames(pair_list) <-c("tf","pw","str")
TF_rank_MI<-data.frame(sort(table(as.character(pair_list$tf)),decreasing = T))
discardTFs<-setdiff(as.character(colnames(tf.data)),as.character(TF_rank_MI$Var1))
Freq<-rep(0,length(discardTFs))
Var1<-discardTFs
TF_rank_MI<-rbind(TF_rank_MI,data.frame(Var1,Freq))
#output 1
write.csv(TF_rank_MI,output_file1)
#output2
write.csv(pair_list,output_file2,row.names = F)
#output3
fn <- output_file3
if (file.exists(fn)) file.remove(fn)
p.vartable2TF1PW<-create_empty_table(0,4)
for (pw in as.vector(unique(pair_list$pw))){
#pw = "CCoAOMT1"
temp_df<-pair_list[pair_list$pw==pw,]
if(length(unique(temp_df$tf))<2){next}
TF2PW<-data.frame(t(combn(unique(as.character(temp_df$tf)),2)))
pw_df<-data.frame(rep(pw,dim(TF2PW)[1]))
colnames(pw_df)<-"pw"
TF2PW<- cbind(pw_df,TF2PW)
selected_2TF1PW_p.value<-c()
for(i in 1:dim(TF2PW)[1]){
I2<-mi3(tf.data[as.character(TF2PW[i,]$X1)],tf.data[as.character(TF2PW[i,]$X2)],
as.vector(pathway.data[as.character(TF2PW[i,]$pw)]))
z<-(I2-mean(randomized_3GI_2TF_1PW))/sd(randomized_3GI_2TF_1PW)
selected_2TF1PW_p.value<-c(selected_2TF1PW_p.value,pnorm(z,lower.tail=FALSE))
#selected_2TF1PW_p.value<- generate_permut_pval(tf.data[as.character(TF2PW[i,]$X1)],tf.data[as.character(TF2PW[i,]$X2)],
# as.vector(pathway.data[as.character(TF2PW[i,]$pw)]),I2)
}
selected2TF1PW<-cbind(TF2PW,
data.frame(p.adjust(selected_2TF1PW_p.value, method = "bonferroni", n = length(selected_2TF1PW_p.value))))
colnames(selected2TF1PW)[4]<-"corr.p.val"
selected2TF1PW<-selected2TF1PW[with(selected2TF1PW, order(corr.p.val)), ]
selected2TF1PW<-selected2TF1PW[1:floor(dim(selected2TF1PW)[1]*alpha2),]
p.vartable2TF1PW<-rbind(p.vartable2TF1PW,selected2TF1PW)
allselectedTFs<-c(as.character(selected2TF1PW$X1),as.character(selected2TF1PW$X2))
allselectedTFs<-as.data.frame(table(allselectedTFs))
allselectedTFs<-allselectedTFs[with(allselectedTFs,order(-Freq)),]
catenate<-c()
for(i in 1:dim(allselectedTFs)[1]){
catenate<-c(catenate,paste(as.character(allselectedTFs[i,1]),"-(",allselectedTFs[i,2],")",sep = ""))
}
#catenate<-unique(c(as.character(selected2TF1PW$X1),as.character(selected2TF1PW$X2)))
catenate <- catenate[!is.na(catenate)]
cat(catenate,"=>",pw,"\n",
file="./output_results/output_combinatorial_TF.txt",
sep=" ",append=TRUE)
}
#write.csv(p.vartable2TF1PW,"TFcombinationp.values.csv")
fn <- "./temp_output_all_combinations.csv"
if (file.exists(fn)) file.remove(fn)
| /run_TGMI.R | no_license | cjgunase/TGMI | R | false | false | 6,073 | r | #Author: Chathura J Gunasekara
#Version: 1
#data: June 20, 2017
#install.packages("doMC")
#install.packages("infotheo")
#install.packages(stats)
#install.packages(MASS)
#install.packages(iterators)
#install.packages(parallel)
#install.packages(foreach)
library("parallel")
library(stats)
library(MASS)
library(iterators)
library(parallel)
library(foreach)
library(doMC)
library(infotheo)
#Change following paramenters for input datasets
###############################################################
ncore = detectCores()
#pathway gene file name, the file must be .csv file
#sample input files are shown
#Pathway FILE
#Arabidopsis Lignin pathway
#pathway_file = "./input_data/Arabidopsis/pathways/lignin_pathway_data.csv"
#Arabidopsis Lignin pathway
#pathway_file = "./input_data/Arabidopsis/pathways/lignin_pathway_data.csv
#Mouse Pluripotency pathway
pathway_file = "./input_data/Mouse_pluripotency_maintenance/mouse_pathway_data.csv"
#TF File
#TF file name, the file must be .csv file
#Arabidopsis
#tf_file = "./input_data/Arabidopsis/Athaliana_AllTFs.csv"
#Mouse
#tf_file = "input_data/Mouse_pluripotency_maintenance/Dataset1/dataset1_135TFs.csv"
#tf_file = "input_data/Mouse_pluripotency_maintenance/Dataset2/dataset2_235TFs.csv"
tf_file = "input_data/Mouse_pluripotency_maintenance/Dataset3/dataset3_335TFs.csv"
#the format of gene expression data "samples_in_row" or "samples_in_column"
format = "samples_in_row"
#cutoff_pvalues
alpha1 = 0.05
alpha2 = 0.05
#output files
output_file1 = "./output_results/output_Ranked_TF_Frequency.csv"
output_file2 = "./output_results/output_network.csv"
output_file3 = "./output_results/output_combinatorial_TF.txt"
##############################################################
if (format == "samples_in_row") {
pathway.data = read.csv(pathway_file,header = TRUE)
tf.data = read.csv(tf_file,header = TRUE)
}else{
pathway.data = read.csv(
pathway_file,stringsAsFactors = FALSE,header = FALSE,row.names = 1
)
tf.data = read.csv(
tf_file,stringsAsFactors = FALSE,header = FALSE,row.names = 1
)
pathway.data = t(as.matrix(pathway.data))
tf.data = t(as.matrix(tf.data))
}
gn <-colnames(pathway.data)
tfn<-colnames(tf.data)
nsample=dim(tf.data)[1]
source("TGMI.R")
#X4CL1
#PAL1
#MYB85
#pw1<-as.numeric(pathway.data["X4CL1"]$X4CL1)
#pw2<-as.numeric(pathway.data["PAL1"]$PAL1)
#tf <-as.numeric(tf.data["MYB85"]$MYB85)
#func(pw1,pw2,tf)
results<-parallel_approach1(pathway.data,tf.data,gn,tfn)
write.csv(results,"./temp_output_all_combinations.csv",row.names = F)
###Output combinations Done###
results <- read.csv("./temp_output_all_combinations.csv",header=T)
results<-results[with(results, order(S7_div_123_pval)), ]
corr.pval<-p.adjust(results$S7_div_123_pval, method = "BH", n = length(results$S7_div_123_pval))
results$corrected.pval<-corr.pval
selected_results <-results[as.numeric(results$corrected.pval) < alpha1,]
pair_list<-rbind(data.frame(tf=selected_results$X,pw=selected_results$Y1,str=as.numeric(as.character(selected_results$S7_div_123))),
data.frame(tf=selected_results$X,pw=selected_results$Y2,str=as.numeric(as.character(selected_results$S7_div_123))))
#aggregate pair_list df to get average str for each pair
pair_list <- aggregate(pair_list[,3], list(pair_list$tf,pair_list$pw), mean)
colnames(pair_list) <-c("tf","pw","str")
TF_rank_MI<-data.frame(sort(table(as.character(pair_list$tf)),decreasing = T))
discardTFs<-setdiff(as.character(colnames(tf.data)),as.character(TF_rank_MI$Var1))
Freq<-rep(0,length(discardTFs))
Var1<-discardTFs
TF_rank_MI<-rbind(TF_rank_MI,data.frame(Var1,Freq))
#output 1
write.csv(TF_rank_MI,output_file1)
#output2
write.csv(pair_list,output_file2,row.names = F)
#output3
fn <- output_file3
if (file.exists(fn)) file.remove(fn)
p.vartable2TF1PW<-create_empty_table(0,4)
for (pw in as.vector(unique(pair_list$pw))){
#pw = "CCoAOMT1"
temp_df<-pair_list[pair_list$pw==pw,]
if(length(unique(temp_df$tf))<2){next}
TF2PW<-data.frame(t(combn(unique(as.character(temp_df$tf)),2)))
pw_df<-data.frame(rep(pw,dim(TF2PW)[1]))
colnames(pw_df)<-"pw"
TF2PW<- cbind(pw_df,TF2PW)
selected_2TF1PW_p.value<-c()
for(i in 1:dim(TF2PW)[1]){
I2<-mi3(tf.data[as.character(TF2PW[i,]$X1)],tf.data[as.character(TF2PW[i,]$X2)],
as.vector(pathway.data[as.character(TF2PW[i,]$pw)]))
z<-(I2-mean(randomized_3GI_2TF_1PW))/sd(randomized_3GI_2TF_1PW)
selected_2TF1PW_p.value<-c(selected_2TF1PW_p.value,pnorm(z,lower.tail=FALSE))
#selected_2TF1PW_p.value<- generate_permut_pval(tf.data[as.character(TF2PW[i,]$X1)],tf.data[as.character(TF2PW[i,]$X2)],
# as.vector(pathway.data[as.character(TF2PW[i,]$pw)]),I2)
}
selected2TF1PW<-cbind(TF2PW,
data.frame(p.adjust(selected_2TF1PW_p.value, method = "bonferroni", n = length(selected_2TF1PW_p.value))))
colnames(selected2TF1PW)[4]<-"corr.p.val"
selected2TF1PW<-selected2TF1PW[with(selected2TF1PW, order(corr.p.val)), ]
selected2TF1PW<-selected2TF1PW[1:floor(dim(selected2TF1PW)[1]*alpha2),]
p.vartable2TF1PW<-rbind(p.vartable2TF1PW,selected2TF1PW)
allselectedTFs<-c(as.character(selected2TF1PW$X1),as.character(selected2TF1PW$X2))
allselectedTFs<-as.data.frame(table(allselectedTFs))
allselectedTFs<-allselectedTFs[with(allselectedTFs,order(-Freq)),]
catenate<-c()
for(i in 1:dim(allselectedTFs)[1]){
catenate<-c(catenate,paste(as.character(allselectedTFs[i,1]),"-(",allselectedTFs[i,2],")",sep = ""))
}
#catenate<-unique(c(as.character(selected2TF1PW$X1),as.character(selected2TF1PW$X2)))
catenate <- catenate[!is.na(catenate)]
cat(catenate,"=>",pw,"\n",
file="./output_results/output_combinatorial_TF.txt",
sep=" ",append=TRUE)
}
#write.csv(p.vartable2TF1PW,"TFcombinationp.values.csv")
fn <- "./temp_output_all_combinations.csv"
if (file.exists(fn)) file.remove(fn)
|
#record time
start_time<-Sys.time()
#-----------------------------------------------------------------------------------------------------
callInfo<-callInfo[custorm_id!='NULL'&!is.na(custorm_id)]
callInfo[,'time_contact']<-dateMap(callInfo$time_contact)
callInfo[,'num_contact'] <-callInfo$num_contact%>%as.character%>%as.numeric
#-----------------------------------------------------------------------------------------------------
#orgMapping
PHONE_ORG_INFO <-loadOBJ(PHONE_ORG_INFO,HP_SHARE_DIR)
callInfo<-merge(callInfo,PHONE_ORG_INFO,by.x='phone_contact',by.y='phone',all.x = T)
#-----------------------------------------------------------------------------------------------------
#custormMapping
PHONE_CUST_INFO<-loadOBJ(PHONE_CUST_INFO,HP_SHARE_DIR)
callInfo<-merge(callInfo,PHONE_CUST_INFO,by.x='phone_contact',by.y='phone',all.x = T)
#-----------------------------------------------------------------------------------------------------
CUSTORMER_CRAWLER_CALL <- dateNumIntFE(callInfo,NULL,'time_contact','num_contact','custorm_id',HP_DATE_GAPS,'call_customerContactTime_')
CUSTORMER_CRAWLER_CALL <- dateVarFE(callInfo,CUSTORMER_CRAWLER_CALL,'time_contact','custorm_id','call_customerContactTime_')
CUSTORMER_CRAWLER_CALL <- dateNumIntFE(callInfo[type_call ==2],CUSTORMER_CRAWLER_CALL,'time_contact','num_contact','custorm_id',HP_DATE_GAPS,'call_customerCallInTime_')
CUSTORMER_CRAWLER_CALL <- dateVarFE(callInfo[type_call ==1],CUSTORMER_CRAWLER_CALL,'time_contact','custorm_id','call_customerCallInTime_')
CUSTORMER_CRAWLER_CALL <- dateNumIntFE(callInfo[type_call ==2],CUSTORMER_CRAWLER_CALL,'time_contact','num_contact','custorm_id',HP_DATE_GAPS,'call_customerCallOutTime_')
CUSTORMER_CRAWLER_CALL <- dateVarFE(callInfo[type_call ==2],CUSTORMER_CRAWLER_CALL,'time_contact','custorm_id','call_customerCallOutTime_')
#-----------------------------------------------------------------------------------------------------
#cateinfo
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!org%>%is.na],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_contactMostOrg' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!org%>%is.na&type_call ==1],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callInMostOrg' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!org%>%is.na&type_call ==2],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callOutMostOrg' )
#-----------------------------------------------------------------------------------------------------
#cateinfo
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!custorm_no%>%is.na],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_contactMostCust' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!custorm_no%>%is.na&type_call ==1],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callInMostCust' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!custorm_no%>%is.na&type_call ==2],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callOutMostCust' )
#-----------------------------------------------------------------------------------------------------
#save datas
batchSaveOBJ(CUSTORMER_CRAWLER_CALL,HP_RESULT_DIR,'custorm_id')
#-----------------------------------------------------------------------------------------------------
end_time<-Sys.time()
time_diff<-(end_time%>%as.numeric-start_time%>%as.numeric)%>%round
if(HP_SUBMUDULE_TIME)paste("have finished submodule: call info [",time_diff,"secs ]")%>%print
| /LabelAutoComputeScripts/SOCIAL_MODULE/CODE/callFeatAggregate.R | no_license | miission/RProj | R | false | false | 3,356 | r | #record time
start_time<-Sys.time()
#-----------------------------------------------------------------------------------------------------
callInfo<-callInfo[custorm_id!='NULL'&!is.na(custorm_id)]
callInfo[,'time_contact']<-dateMap(callInfo$time_contact)
callInfo[,'num_contact'] <-callInfo$num_contact%>%as.character%>%as.numeric
#-----------------------------------------------------------------------------------------------------
#orgMapping
PHONE_ORG_INFO <-loadOBJ(PHONE_ORG_INFO,HP_SHARE_DIR)
callInfo<-merge(callInfo,PHONE_ORG_INFO,by.x='phone_contact',by.y='phone',all.x = T)
#-----------------------------------------------------------------------------------------------------
#custormMapping
PHONE_CUST_INFO<-loadOBJ(PHONE_CUST_INFO,HP_SHARE_DIR)
callInfo<-merge(callInfo,PHONE_CUST_INFO,by.x='phone_contact',by.y='phone',all.x = T)
#-----------------------------------------------------------------------------------------------------
CUSTORMER_CRAWLER_CALL <- dateNumIntFE(callInfo,NULL,'time_contact','num_contact','custorm_id',HP_DATE_GAPS,'call_customerContactTime_')
CUSTORMER_CRAWLER_CALL <- dateVarFE(callInfo,CUSTORMER_CRAWLER_CALL,'time_contact','custorm_id','call_customerContactTime_')
CUSTORMER_CRAWLER_CALL <- dateNumIntFE(callInfo[type_call ==2],CUSTORMER_CRAWLER_CALL,'time_contact','num_contact','custorm_id',HP_DATE_GAPS,'call_customerCallInTime_')
CUSTORMER_CRAWLER_CALL <- dateVarFE(callInfo[type_call ==1],CUSTORMER_CRAWLER_CALL,'time_contact','custorm_id','call_customerCallInTime_')
CUSTORMER_CRAWLER_CALL <- dateNumIntFE(callInfo[type_call ==2],CUSTORMER_CRAWLER_CALL,'time_contact','num_contact','custorm_id',HP_DATE_GAPS,'call_customerCallOutTime_')
CUSTORMER_CRAWLER_CALL <- dateVarFE(callInfo[type_call ==2],CUSTORMER_CRAWLER_CALL,'time_contact','custorm_id','call_customerCallOutTime_')
#-----------------------------------------------------------------------------------------------------
#cateinfo
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!org%>%is.na],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_contactMostOrg' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!org%>%is.na&type_call ==1],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callInMostOrg' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!org%>%is.na&type_call ==2],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callOutMostOrg' )
#-----------------------------------------------------------------------------------------------------
#cateinfo
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!custorm_no%>%is.na],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_contactMostCust' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!custorm_no%>%is.na&type_call ==1],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callInMostCust' )
CUSTORMER_CRAWLER_CALL <-cateVarFE(callInfo[!custorm_no%>%is.na&type_call ==2],CUSTORMER_CRAWLER_CALL,'org','custorm_id','call_callOutMostCust' )
#-----------------------------------------------------------------------------------------------------
#save datas
batchSaveOBJ(CUSTORMER_CRAWLER_CALL,HP_RESULT_DIR,'custorm_id')
#-----------------------------------------------------------------------------------------------------
end_time<-Sys.time()
time_diff<-(end_time%>%as.numeric-start_time%>%as.numeric)%>%round
if(HP_SUBMUDULE_TIME)paste("have finished submodule: call info [",time_diff,"secs ]")%>%print
|
\name{expSBM_ELBO}
\alias{expSBM_ELBO}
\title{expSBM_ELBO}
\description{
Evaluates the evidence lower bound for a given dynamic network.
}
\usage{
expSBM_ELBO(N, edgelist, Z, lambda, mu, nu, directed = F, trunc = T, verbose = F)
}
\arguments{
\item{N}{Number of nodes.}
\item{edgelist}{A matrix with 4 columns: on the first column the sender node, on the second the receiver, on the third either a one or zero to indicate whether it is an interaction or a non-interaction respectively, on the fourth the corresponding exponential length.}
\item{Z}{A \code{NxK} matrix indicating a soft clustering of the nodes into the \code{K} latent groups. The generic entry in position \code{[i,k]} represents the posterior probability that node \code{i} belongs to group \code{k}.}
\item{lambda}{Mixing proportions of the latent groups.}
\item{mu}{A matrix of size \code{KxK} indicating the exponential rates for the interaction lengths, for each pair of groups. Must be a symmetric matrix if \code{directed} is false.}
\item{nu}{A matrix of size \code{KxK} indicating the exponential rates for the non-interaction lengths, for each pair of groups. Must be a symmetric matrix if \code{directed} is false.}
\item{directed}{\code{TRUE} or \code{FALSE} indicating whether interactions have an orientation or not.}
\item{trunc}{\code{TRUE} or \code{FALSE} indicating whether the first and last interactions or non-interactions for every edge are assumed to be truncated or not.}
\item{verbose}{\code{TRUE} or \code{FALSE} indicating whether a lengthy output should be printed out.}
}
\value{
\item{computing_time}{Number of seconds required for the evaluation.}
\item{elbo_value}{Value of the evidence lower bound for the given variational parameters.}
}
| /fuzzedpackages/expSBM/man/expSBM_ELBO.Rd | no_license | akhikolla/testpackages | R | false | false | 1,763 | rd | \name{expSBM_ELBO}
\alias{expSBM_ELBO}
\title{expSBM_ELBO}
\description{
Evaluates the evidence lower bound for a given dynamic network.
}
\usage{
expSBM_ELBO(N, edgelist, Z, lambda, mu, nu, directed = F, trunc = T, verbose = F)
}
\arguments{
\item{N}{Number of nodes.}
\item{edgelist}{A matrix with 4 columns: on the first column the sender node, on the second the receiver, on the third either a one or zero to indicate whether it is an interaction or a non-interaction respectively, on the fourth the corresponding exponential length.}
\item{Z}{A \code{NxK} matrix indicating a soft clustering of the nodes into the \code{K} latent groups. The generic entry in position \code{[i,k]} represents the posterior probability that node \code{i} belongs to group \code{k}.}
\item{lambda}{Mixing proportions of the latent groups.}
\item{mu}{A matrix of size \code{KxK} indicating the exponential rates for the interaction lengths, for each pair of groups. Must be a symmetric matrix if \code{directed} is false.}
\item{nu}{A matrix of size \code{KxK} indicating the exponential rates for the non-interaction lengths, for each pair of groups. Must be a symmetric matrix if \code{directed} is false.}
\item{directed}{\code{TRUE} or \code{FALSE} indicating whether interactions have an orientation or not.}
\item{trunc}{\code{TRUE} or \code{FALSE} indicating whether the first and last interactions or non-interactions for every edge are assumed to be truncated or not.}
\item{verbose}{\code{TRUE} or \code{FALSE} indicating whether a lengthy output should be printed out.}
}
\value{
\item{computing_time}{Number of seconds required for the evaluation.}
\item{elbo_value}{Value of the evidence lower bound for the given variational parameters.}
}
|
\name{sequentialSearch_dir}
\alias{sequentialSearch_dir}
\title{Compute PeakSeg model with given number of peaks}
\description{Compute the most likely peak model with at most the number of
peaks given by \code{peaks.int}. This function repeated calls
\code{\link{PeakSegFPOP_dir}} with different penalty values, until either
(1) it finds the \code{peaks.int} model, or (2) it concludes that there
is no \code{peaks.int} model, in which case it returns the next simplest
model (with fewer peaks than \code{peaks.int}).
The first pair of penalty values (0, Inf) is run in parallel
via the user-specified future plan,
if the future.apply package is available.}
\usage{sequentialSearch_dir(problem.dir,
peaks.int, verbose = 0)}
\arguments{
\item{problem.dir}{problemID directory in which coverage.bedGraph has already been
computed. If there is a labels.bed file then the number of
incorrect labels will be computed in order to find the target
interval of minimal error penalty values.}
\item{peaks.int}{int: target number of peaks.}
\item{verbose}{numeric verbosity: if >0 then \code{\link[base]{cat}} is used to print a message
for each penalty.}
}
\value{Same result list from \code{\link{PeakSegFPOP_dir}}, with an additional
component "others" describing the other models that were computed
before finding the optimal model with \code{peaks.int} (or fewer)
peaks. Additional loss columns are as follows: under=number of
peaks in smaller model during binary search; over=number of peaks
in larger model during binary search; iteration=number of times
PeakSegFPOP has been run.}
\author{Toby Dylan Hocking}
\examples{
## Create simple 6 point data set discussed in supplementary
## materials. GFPOP/GPDPA computes up-down model with 2 peaks, but
## neither CDPA (PeakSegDP::cDPA) nor PDPA (jointseg)
r <- function(chrom, chromStart, chromEnd, coverage){
data.frame(chrom, chromStart, chromEnd, coverage)
}
supp <- rbind(
r("chr1", 0, 1, 3),
r("chr1", 1, 2, 9),
r("chr1", 2, 3, 18),
r("chr1", 3, 4, 15),
r("chr1", 4, 5, 20),
r("chr1", 5, 6, 2)
)
data.dir <- file.path(tempfile(), "chr1-0-6")
dir.create(data.dir, recursive=TRUE)
write.table(
supp, file.path(data.dir, "coverage.bedGraph"),
sep="\\t", row.names=FALSE, col.names=FALSE)
## register a parallel future plan to compute the first two
## penalties in parallel during the sequential search.
if(interactive() && requireNamespace("future"))future::plan("multiprocess")
## Compute optimal up-down model with 2 peaks via sequential search.
fit <- PeakSegDisk::sequentialSearch_dir(data.dir, 2L)
library(ggplot2)
ggplot()+
theme_bw()+
geom_point(aes(
chromEnd, coverage),
data=supp)+
geom_segment(aes(
chromStart+0.5, mean,
xend=chromEnd+0.5, yend=mean),
data=fit$segments,
color="green")
}
| /man/sequentialSearch_dir.Rd | no_license | tdhock/PeakSegDisk | R | false | false | 2,814 | rd | \name{sequentialSearch_dir}
\alias{sequentialSearch_dir}
\title{Compute PeakSeg model with given number of peaks}
\description{Compute the most likely peak model with at most the number of
peaks given by \code{peaks.int}. This function repeated calls
\code{\link{PeakSegFPOP_dir}} with different penalty values, until either
(1) it finds the \code{peaks.int} model, or (2) it concludes that there
is no \code{peaks.int} model, in which case it returns the next simplest
model (with fewer peaks than \code{peaks.int}).
The first pair of penalty values (0, Inf) is run in parallel
via the user-specified future plan,
if the future.apply package is available.}
\usage{sequentialSearch_dir(problem.dir,
peaks.int, verbose = 0)}
\arguments{
\item{problem.dir}{problemID directory in which coverage.bedGraph has already been
computed. If there is a labels.bed file then the number of
incorrect labels will be computed in order to find the target
interval of minimal error penalty values.}
\item{peaks.int}{int: target number of peaks.}
\item{verbose}{numeric verbosity: if >0 then \code{\link[base]{cat}} is used to print a message
for each penalty.}
}
\value{Same result list from \code{\link{PeakSegFPOP_dir}}, with an additional
component "others" describing the other models that were computed
before finding the optimal model with \code{peaks.int} (or fewer)
peaks. Additional loss columns are as follows: under=number of
peaks in smaller model during binary search; over=number of peaks
in larger model during binary search; iteration=number of times
PeakSegFPOP has been run.}
\author{Toby Dylan Hocking}
\examples{
## Create simple 6 point data set discussed in supplementary
## materials. GFPOP/GPDPA computes up-down model with 2 peaks, but
## neither CDPA (PeakSegDP::cDPA) nor PDPA (jointseg)
r <- function(chrom, chromStart, chromEnd, coverage){
data.frame(chrom, chromStart, chromEnd, coverage)
}
supp <- rbind(
r("chr1", 0, 1, 3),
r("chr1", 1, 2, 9),
r("chr1", 2, 3, 18),
r("chr1", 3, 4, 15),
r("chr1", 4, 5, 20),
r("chr1", 5, 6, 2)
)
data.dir <- file.path(tempfile(), "chr1-0-6")
dir.create(data.dir, recursive=TRUE)
write.table(
supp, file.path(data.dir, "coverage.bedGraph"),
sep="\\t", row.names=FALSE, col.names=FALSE)
## register a parallel future plan to compute the first two
## penalties in parallel during the sequential search.
if(interactive() && requireNamespace("future"))future::plan("multiprocess")
## Compute optimal up-down model with 2 peaks via sequential search.
fit <- PeakSegDisk::sequentialSearch_dir(data.dir, 2L)
library(ggplot2)
ggplot()+
theme_bw()+
geom_point(aes(
chromEnd, coverage),
data=supp)+
geom_segment(aes(
chromStart+0.5, mean,
xend=chromEnd+0.5, yend=mean),
data=fit$segments,
color="green")
}
|
tsb <- function(data,h=10,w=NULL,init=c("mean","naive"),
cost=c("mar","msr","mae","mse"),
init.opt=c(TRUE,FALSE),outplot=c(FALSE,TRUE),
opt.on=c(FALSE,TRUE),na.rm=c(FALSE,TRUE)){
# TSB method
#
# Inputs:
# data Intermittent demand time series.
# h Forecast horizon.
# w Smoothing parameters. If w == NULL then parameters are optimised.
# Otherwise first parameter is for demand and second for demand probability.
# init Initial values for demand and intervals. This can be:
# c(z,x) - Vector of two scalars, where first is initial demand and
# second is initial interval;
# "naive" - Initial demand is first non-zero demand and initial demand
# probability is again the first one;
# "mean" - Same as "naive", but initial demand probability is the mean
# of all in sample probabilities.
# cost Cost function used for optimisation
# "mar" - Mean absolute rate
# "msr" - Mean squared rate
# "mae" - Mean absolute error
# "mse" - Mean squared error
# init.opt If init.opt==TRUE then initial values are optimised.
# outplot If TRUE a plot of the forecast is provided.
# opt.on This is meant to use only by the optimisation function. When opt.on is
# TRUE then no checks on inputs are performed.
# na.rm A logical value indicating whether NA values should be remove using the method.
#
# Outputs:
# model Type of model fitted.
# frc.in In-sample demand rate.
# frc.out Out-of-sample demand rate.
# weights Smoothing parameters for demand and demand probability.
# initial Initialisation values for demand and demand probability smoothing.
#
# Example:
# tsb(ts.data1,outplot=TRUE)
#
# Notes:
# Optimisation of the method described in:
# N. Kourentzes, 2014, On intermittent demand model optimisation and selection,
# International Journal of Production Economics, 156: 180-190.
# http://dx.doi.org/10.1016/j.ijpe.2014.06.007
# http://kourentzes.com/forecasting/2014/06/11/on-intermittent-demand-model-optimisation-and-selection/
#
# Nikolaos Kourentzes, 2014 <nikolaos@kourentzes.com>
# Defaults
cost <- cost[1]
init.opt <- init.opt[1]
outplot <- outplot[1]
opt.on <- opt.on[1]
na.rm <- na.rm[1]
if (!is.numeric(init)){
init <- init[1]
} else {
if (length(init>=2)){
init <- init[1:2]
} else {
init <- "mean"
}
}
# Prepare data
if (isa(data,"data.frame")){
if (ncol(data)>1){
warning("Data frame with more than one columns. Using only first one.")
}
data <- data[[1]]
}
if (na.rm == TRUE){
data <- data[!is.na(data)]
}
n <- length(data)
# TSB decomposition
p <- as.numeric(data!=0) # Demand probability
z <- data[data!=0] # Non-zero demand
# Initialise
if (!(is.numeric(init) && length(init)==2)){
if (init=="mean"){
init <- c(z[1],mean(p))
} else {
init <- c(z[1],p[1])
}
}
# Optimise parameters if requested
if (opt.on == FALSE){
if (is.null(w) || init.opt == TRUE){
wopt <- tsb.opt(data,cost,w,init,init.opt)
w <- wopt$w
init <- wopt$init
} else {
if (length(w)!=2){
stop(paste("w must be a vector of 2 elements: the smoothing parameter",
" for the non-zero demand and the parameter for the ",
"probability of demand.",sep=""))
}
}
}
# Pre-allocate memory
zfit <- vector("numeric",n)
pfit <- vector("numeric",n)
# Assign initial values and parameters
if (opt.on == FALSE){
if (init[1]<0){
stop("Initial demand cannot be a negative number.")
}
if (init[2]<0){
stop("Initial demand probability cannot be a negative number.")
}
}
zfit[1] <- init[1]
pfit[1] <- init[2]
# Fit model
for (i in 2:n){
pfit[i] <- pfit[i-1] + w[2]*(p[i]-pfit[i-1]) # Demand probability
if (p[i]==0){
zfit[i] <- zfit[i-1]
} else {
zfit[i] <- zfit[i-1] + w[1]*(data[i]-zfit[i-1]) # Demand
}
}
yfit <- pfit*zfit
frc.in <- c(NA,yfit[1:(n-1)])
if (h>0){
frc.out <- rep(yfit[n],h)
} else {
frc.out = NULL
}
# Plot
if (outplot==TRUE){
plot(1:n,data,type="l",xlim=c(1,(n+h)),xlab="Period",ylab="",
xaxs="i",yaxs="i",ylim=c(0,max(data)*1.1))
lines(which(data>0),data[data>0],type="p",pch=20)
lines(1:n,frc.in,col="red")
lines((n+1):(n+h),frc.out,col="red",lwd=2)
}
return(list(model="tsb",frc.in=frc.in,frc.out=frc.out,
weights=w,initial=c(zfit[1],pfit[1])))
}
#-------------------------------------------------
tsb.opt <- function(data,cost=c("mar","msr","mae","mse"),w=NULL,
init,init.opt=c(TRUE,FALSE)){
# Optimisation function for TSB
cost <- cost[1]
init.opt <- init.opt[1]
if (is.null(w) == TRUE && init.opt == TRUE){
# Optimise w and init
p0 <- c(rep(0.05,2),init[1],init[2])
lbound <- c(0,0,0,0)
ubound <- c(1,1,max(data),1)
wopt <- optim(par=p0,tsb.cost,method="Nelder-Mead",data=data,cost=cost,
w=w,w.opt=is.null(w),init=init,init.opt=init.opt,
lbound=lbound,ubound=ubound,control=list(maxit = 2000))$par
} else if (is.null(w) == TRUE && init.opt == FALSE){
# Optimise only w
p0 <- c(rep(0.05,2))
lbound <- c(0,0)
ubound <- c(1,1)
wopt <- optim(par=p0,tsb.cost,method="Nelder-Mead",data=data,cost=cost,
w=w,w.opt=is.null(w),init=init,init.opt=init.opt,
lbound=lbound,ubound=ubound,control=list(maxit = 2000))$par
wopt <- c(wopt,init)
} else if (is.null(w) == FALSE && init.opt == TRUE){
# Optimise only init
p0 <- c(init[1],init[2])
lbound <- c(0,0)
ubound <- c(max(data),1)
wopt <- optim(par=p0,tsb.cost,method="Nelder-Mead",data=data,cost=cost,
w=w,w.opt=is.null(w),init=init,init.opt=init.opt,
lbound=lbound,ubound=ubound,control=list(maxit = 2000))$par
wopt <- c(w,wopt)
}
return(list(w=wopt[1:2],init=wopt[3:4]))
}
#-------------------------------------------------
tsb.cost <- function(p0,data,cost,w,w.opt,init,init.opt,lbound,ubound){
# Cost functions for TSB
if (w.opt == TRUE && init.opt == TRUE){
frc.in <- tsb(data=data,w=p0[1:2],h=0,init=p0[3:4],opt.on=TRUE)$frc.in
} else if (w.opt == TRUE && init.opt == FALSE){
frc.in <- tsb(data=data,w=p0[1:2],h=0,init=init,opt.on=TRUE)$frc.in
} else if (w.opt == FALSE && init.opt == TRUE){
frc.in <- tsb(data=data,w=w,h=0,init=p0[1:2],opt.on=TRUE)$frc.in
}
if (cost == "mse"){
E <- data - frc.in
E <- E[!is.na(E)]
E <- mean(E^2)
} else if(cost == "mae"){
E <- data - frc.in
E <- E[!is.na(E)]
E <- mean(abs(E))
} else if(cost == "mar"){
n <- length(data)
temp <- cumsum(data)/(1:n)
n <- ceiling(0.3*n)
temp[1:n] <- temp[n]
E <- abs(frc.in - temp)
E <- E[!is.na(E)]
E <- sum(E)
} else if(cost == "msr"){
n <- length(data)
temp <- cumsum(data)/(1:n)
n <- ceiling(0.3*n)
temp[1:n] <- temp[n]
E <- (frc.in - temp)^2
E <- E[!is.na(E)]
E <- sum(E)
}
# Bounds
for (i in 1:(2*w.opt+2*init.opt)){
if (!p0[i]>=lbound[i] | !p0[i]<=ubound[i]){
E <- 9*10^99
}
}
# Parameter of demand probability must be smaller than parameter of demand
if (w.opt == TRUE){
if (p0[1] < p0[2]){
E <- 9*10^99
}
}
return(E)
} | /R/tsb.R | no_license | trnnick/tsintermittent | R | false | false | 7,726 | r | tsb <- function(data,h=10,w=NULL,init=c("mean","naive"),
cost=c("mar","msr","mae","mse"),
init.opt=c(TRUE,FALSE),outplot=c(FALSE,TRUE),
opt.on=c(FALSE,TRUE),na.rm=c(FALSE,TRUE)){
# TSB method
#
# Inputs:
# data Intermittent demand time series.
# h Forecast horizon.
# w Smoothing parameters. If w == NULL then parameters are optimised.
# Otherwise first parameter is for demand and second for demand probability.
# init Initial values for demand and intervals. This can be:
# c(z,x) - Vector of two scalars, where first is initial demand and
# second is initial interval;
# "naive" - Initial demand is first non-zero demand and initial demand
# probability is again the first one;
# "mean" - Same as "naive", but initial demand probability is the mean
# of all in sample probabilities.
# cost Cost function used for optimisation
# "mar" - Mean absolute rate
# "msr" - Mean squared rate
# "mae" - Mean absolute error
# "mse" - Mean squared error
# init.opt If init.opt==TRUE then initial values are optimised.
# outplot If TRUE a plot of the forecast is provided.
# opt.on This is meant to use only by the optimisation function. When opt.on is
# TRUE then no checks on inputs are performed.
# na.rm A logical value indicating whether NA values should be remove using the method.
#
# Outputs:
# model Type of model fitted.
# frc.in In-sample demand rate.
# frc.out Out-of-sample demand rate.
# weights Smoothing parameters for demand and demand probability.
# initial Initialisation values for demand and demand probability smoothing.
#
# Example:
# tsb(ts.data1,outplot=TRUE)
#
# Notes:
# Optimisation of the method described in:
# N. Kourentzes, 2014, On intermittent demand model optimisation and selection,
# International Journal of Production Economics, 156: 180-190.
# http://dx.doi.org/10.1016/j.ijpe.2014.06.007
# http://kourentzes.com/forecasting/2014/06/11/on-intermittent-demand-model-optimisation-and-selection/
#
# Nikolaos Kourentzes, 2014 <nikolaos@kourentzes.com>
# Defaults
cost <- cost[1]
init.opt <- init.opt[1]
outplot <- outplot[1]
opt.on <- opt.on[1]
na.rm <- na.rm[1]
if (!is.numeric(init)){
init <- init[1]
} else {
if (length(init>=2)){
init <- init[1:2]
} else {
init <- "mean"
}
}
# Prepare data
if (isa(data,"data.frame")){
if (ncol(data)>1){
warning("Data frame with more than one columns. Using only first one.")
}
data <- data[[1]]
}
if (na.rm == TRUE){
data <- data[!is.na(data)]
}
n <- length(data)
# TSB decomposition
p <- as.numeric(data!=0) # Demand probability
z <- data[data!=0] # Non-zero demand
# Initialise
if (!(is.numeric(init) && length(init)==2)){
if (init=="mean"){
init <- c(z[1],mean(p))
} else {
init <- c(z[1],p[1])
}
}
# Optimise parameters if requested
if (opt.on == FALSE){
if (is.null(w) || init.opt == TRUE){
wopt <- tsb.opt(data,cost,w,init,init.opt)
w <- wopt$w
init <- wopt$init
} else {
if (length(w)!=2){
stop(paste("w must be a vector of 2 elements: the smoothing parameter",
" for the non-zero demand and the parameter for the ",
"probability of demand.",sep=""))
}
}
}
# Pre-allocate memory
zfit <- vector("numeric",n)
pfit <- vector("numeric",n)
# Assign initial values and parameters
if (opt.on == FALSE){
if (init[1]<0){
stop("Initial demand cannot be a negative number.")
}
if (init[2]<0){
stop("Initial demand probability cannot be a negative number.")
}
}
zfit[1] <- init[1]
pfit[1] <- init[2]
# Fit model
for (i in 2:n){
pfit[i] <- pfit[i-1] + w[2]*(p[i]-pfit[i-1]) # Demand probability
if (p[i]==0){
zfit[i] <- zfit[i-1]
} else {
zfit[i] <- zfit[i-1] + w[1]*(data[i]-zfit[i-1]) # Demand
}
}
yfit <- pfit*zfit
frc.in <- c(NA,yfit[1:(n-1)])
if (h>0){
frc.out <- rep(yfit[n],h)
} else {
frc.out = NULL
}
# Plot
if (outplot==TRUE){
plot(1:n,data,type="l",xlim=c(1,(n+h)),xlab="Period",ylab="",
xaxs="i",yaxs="i",ylim=c(0,max(data)*1.1))
lines(which(data>0),data[data>0],type="p",pch=20)
lines(1:n,frc.in,col="red")
lines((n+1):(n+h),frc.out,col="red",lwd=2)
}
return(list(model="tsb",frc.in=frc.in,frc.out=frc.out,
weights=w,initial=c(zfit[1],pfit[1])))
}
#-------------------------------------------------
tsb.opt <- function(data,cost=c("mar","msr","mae","mse"),w=NULL,
init,init.opt=c(TRUE,FALSE)){
# Optimisation function for TSB
cost <- cost[1]
init.opt <- init.opt[1]
if (is.null(w) == TRUE && init.opt == TRUE){
# Optimise w and init
p0 <- c(rep(0.05,2),init[1],init[2])
lbound <- c(0,0,0,0)
ubound <- c(1,1,max(data),1)
wopt <- optim(par=p0,tsb.cost,method="Nelder-Mead",data=data,cost=cost,
w=w,w.opt=is.null(w),init=init,init.opt=init.opt,
lbound=lbound,ubound=ubound,control=list(maxit = 2000))$par
} else if (is.null(w) == TRUE && init.opt == FALSE){
# Optimise only w
p0 <- c(rep(0.05,2))
lbound <- c(0,0)
ubound <- c(1,1)
wopt <- optim(par=p0,tsb.cost,method="Nelder-Mead",data=data,cost=cost,
w=w,w.opt=is.null(w),init=init,init.opt=init.opt,
lbound=lbound,ubound=ubound,control=list(maxit = 2000))$par
wopt <- c(wopt,init)
} else if (is.null(w) == FALSE && init.opt == TRUE){
# Optimise only init
p0 <- c(init[1],init[2])
lbound <- c(0,0)
ubound <- c(max(data),1)
wopt <- optim(par=p0,tsb.cost,method="Nelder-Mead",data=data,cost=cost,
w=w,w.opt=is.null(w),init=init,init.opt=init.opt,
lbound=lbound,ubound=ubound,control=list(maxit = 2000))$par
wopt <- c(w,wopt)
}
return(list(w=wopt[1:2],init=wopt[3:4]))
}
#-------------------------------------------------
tsb.cost <- function(p0,data,cost,w,w.opt,init,init.opt,lbound,ubound){
# Cost functions for TSB
if (w.opt == TRUE && init.opt == TRUE){
frc.in <- tsb(data=data,w=p0[1:2],h=0,init=p0[3:4],opt.on=TRUE)$frc.in
} else if (w.opt == TRUE && init.opt == FALSE){
frc.in <- tsb(data=data,w=p0[1:2],h=0,init=init,opt.on=TRUE)$frc.in
} else if (w.opt == FALSE && init.opt == TRUE){
frc.in <- tsb(data=data,w=w,h=0,init=p0[1:2],opt.on=TRUE)$frc.in
}
if (cost == "mse"){
E <- data - frc.in
E <- E[!is.na(E)]
E <- mean(E^2)
} else if(cost == "mae"){
E <- data - frc.in
E <- E[!is.na(E)]
E <- mean(abs(E))
} else if(cost == "mar"){
n <- length(data)
temp <- cumsum(data)/(1:n)
n <- ceiling(0.3*n)
temp[1:n] <- temp[n]
E <- abs(frc.in - temp)
E <- E[!is.na(E)]
E <- sum(E)
} else if(cost == "msr"){
n <- length(data)
temp <- cumsum(data)/(1:n)
n <- ceiling(0.3*n)
temp[1:n] <- temp[n]
E <- (frc.in - temp)^2
E <- E[!is.na(E)]
E <- sum(E)
}
# Bounds
for (i in 1:(2*w.opt+2*init.opt)){
if (!p0[i]>=lbound[i] | !p0[i]<=ubound[i]){
E <- 9*10^99
}
}
# Parameter of demand probability must be smaller than parameter of demand
if (w.opt == TRUE){
if (p0[1] < p0[2]){
E <- 9*10^99
}
}
return(E)
} |
library(distrEx)
### Name: var
### Title: Generic Functions for the Computation of Functionals
### Aliases: var var-methods var,ANY-method
### var,UnivariateDistribution-method var,AffLinDistribution-method
### var,AffLinAbscontDistribution-method
### var,AffLinDiscreteDistribution-method
### var,AffLinLatticeDistribution-method var,CompoundDistribution-method
### var,Arcsine-method var,Beta-method var,Binom-method var,Cauchy-method
### var,Chisq-method var,Dirac-method var,DExp-method var,Exp-method
### var,Fd-method var,Gammad-method var,Geom-method var,Hyper-method
### var,Logis-method var,Lnorm-method var,Nbinom-method var,Norm-method
### var,Pois-method var,Unif-method var,Weibull-method var,Td-method sd
### sd-methods sd,UnivariateDistribution-method sd,Norm-method median
### median,ANY-method median-methods median,UnivariateDistribution-method
### median,UnivariateCondDistribution-method
### median,AffLinDistribution-method
### median,AffLinAbscontDistribution-method
### median,AffLinDiscreteDistribution-method
### median,AffLinLatticeDistribution-method median,Arcsine-method
### median,Cauchy-method median,Dirac-method median,DExp-method
### median,Exp-method median,Geom-method median,Logis-method
### median,Lnorm-method median,Norm-method median,Unif-method IQR
### IQR-methods IQR,ANY-method IQR,UnivariateDistribution-method
### IQR,UnivariateCondDistribution-method IQR,AffLinDistribution-method
### IQR,AffLinAbscontDistribution-method
### IQR,AffLinDiscreteDistribution-method
### IQR,AffLinLatticeDistribution-method IQR,DiscreteDistribution-method
### IQR,Arcsine-method IQR,Cauchy-method IQR,Dirac-method IQR,DExp-method
### IQR,Exp-method IQR,Geom-method IQR,Logis-method IQR,Norm-method
### IQR,Unif-method mad mad,ANY-method mad-methods
### mad,UnivariateDistribution-method mad,AffLinDistribution-method
### mad,AffLinAbscontDistribution-method
### mad,AffLinDiscreteDistribution-method
### mad,AffLinLatticeDistribution-method mad,Cauchy-method
### mad,Dirac-method mad,DExp-method mad,Exp-method mad,Geom-method
### mad,Logis-method mad,Norm-method mad,Unif-method mad,Arcsine-method
### skewness skewness-methods skewness,ANY-method
### skewness,UnivariateDistribution-method
### skewness,AffLinDistribution-method
### skewness,AffLinAbscontDistribution-method
### skewness,AffLinDiscreteDistribution-method
### skewness,AffLinLatticeDistribution-method skewness,Arcsine-method
### skewness,Beta-method skewness,Binom-method skewness,Cauchy-method
### skewness,Chisq-method skewness,Dirac-method skewness,DExp-method
### skewness,Exp-method skewness,Fd-method skewness,Gammad-method
### skewness,Geom-method skewness,Hyper-method skewness,Logis-method
### skewness,Lnorm-method skewness,Nbinom-method skewness,Norm-method
### skewness,Pois-method skewness,Unif-method skewness,Weibull-method
### skewness,Td-method kurtosis kurtosis-methods kurtosis,ANY-method
### kurtosis,UnivariateDistribution-method
### kurtosis,AffLinDistribution-method
### kurtosis,AffLinAbscontDistribution-method
### kurtosis,AffLinDiscreteDistribution-method
### kurtosis,AffLinLatticeDistribution-method kurtosis,Arcsine-method
### kurtosis,Beta-method kurtosis,Binom-method kurtosis,Cauchy-method
### kurtosis,Chisq-method kurtosis,Dirac-method kurtosis,DExp-method
### kurtosis,Exp-method kurtosis,Fd-method kurtosis,Gammad-method
### kurtosis,Geom-method kurtosis,Hyper-method kurtosis,Logis-method
### kurtosis,Lnorm-method kurtosis,Nbinom-method kurtosis,Norm-method
### kurtosis,Pois-method kurtosis,Unif-method kurtosis,Weibull-method
### kurtosis,Td-method
### Keywords: methods distribution
### ** Examples
# Variance of Exp(1) distribution
var(Exp())
#median(Exp())
IQR(Exp())
mad(Exp())
# Variance of N(1,4)^2
var(Norm(mean=1, sd=2), fun = function(x){x^2})
var(Norm(mean=1, sd=2), fun = function(x){x^2}, useApply = FALSE)
## sd -- may equivalently be replaced by var
sd(Pois()) ## uses explicit terms
sd(as(Pois(),"DiscreteDistribution")) ## uses sums
sd(as(Pois(),"UnivariateDistribution")) ## uses simulations
sd(Norm(mean=2), fun = function(x){2*x^2}) ## uses simulations
#
mad(sin(exp(Norm()+2*Pois()))) ## weird
| /data/genthat_extracted_code/distrEx/examples/Var.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 4,278 | r | library(distrEx)
### Name: var
### Title: Generic Functions for the Computation of Functionals
### Aliases: var var-methods var,ANY-method
### var,UnivariateDistribution-method var,AffLinDistribution-method
### var,AffLinAbscontDistribution-method
### var,AffLinDiscreteDistribution-method
### var,AffLinLatticeDistribution-method var,CompoundDistribution-method
### var,Arcsine-method var,Beta-method var,Binom-method var,Cauchy-method
### var,Chisq-method var,Dirac-method var,DExp-method var,Exp-method
### var,Fd-method var,Gammad-method var,Geom-method var,Hyper-method
### var,Logis-method var,Lnorm-method var,Nbinom-method var,Norm-method
### var,Pois-method var,Unif-method var,Weibull-method var,Td-method sd
### sd-methods sd,UnivariateDistribution-method sd,Norm-method median
### median,ANY-method median-methods median,UnivariateDistribution-method
### median,UnivariateCondDistribution-method
### median,AffLinDistribution-method
### median,AffLinAbscontDistribution-method
### median,AffLinDiscreteDistribution-method
### median,AffLinLatticeDistribution-method median,Arcsine-method
### median,Cauchy-method median,Dirac-method median,DExp-method
### median,Exp-method median,Geom-method median,Logis-method
### median,Lnorm-method median,Norm-method median,Unif-method IQR
### IQR-methods IQR,ANY-method IQR,UnivariateDistribution-method
### IQR,UnivariateCondDistribution-method IQR,AffLinDistribution-method
### IQR,AffLinAbscontDistribution-method
### IQR,AffLinDiscreteDistribution-method
### IQR,AffLinLatticeDistribution-method IQR,DiscreteDistribution-method
### IQR,Arcsine-method IQR,Cauchy-method IQR,Dirac-method IQR,DExp-method
### IQR,Exp-method IQR,Geom-method IQR,Logis-method IQR,Norm-method
### IQR,Unif-method mad mad,ANY-method mad-methods
### mad,UnivariateDistribution-method mad,AffLinDistribution-method
### mad,AffLinAbscontDistribution-method
### mad,AffLinDiscreteDistribution-method
### mad,AffLinLatticeDistribution-method mad,Cauchy-method
### mad,Dirac-method mad,DExp-method mad,Exp-method mad,Geom-method
### mad,Logis-method mad,Norm-method mad,Unif-method mad,Arcsine-method
### skewness skewness-methods skewness,ANY-method
### skewness,UnivariateDistribution-method
### skewness,AffLinDistribution-method
### skewness,AffLinAbscontDistribution-method
### skewness,AffLinDiscreteDistribution-method
### skewness,AffLinLatticeDistribution-method skewness,Arcsine-method
### skewness,Beta-method skewness,Binom-method skewness,Cauchy-method
### skewness,Chisq-method skewness,Dirac-method skewness,DExp-method
### skewness,Exp-method skewness,Fd-method skewness,Gammad-method
### skewness,Geom-method skewness,Hyper-method skewness,Logis-method
### skewness,Lnorm-method skewness,Nbinom-method skewness,Norm-method
### skewness,Pois-method skewness,Unif-method skewness,Weibull-method
### skewness,Td-method kurtosis kurtosis-methods kurtosis,ANY-method
### kurtosis,UnivariateDistribution-method
### kurtosis,AffLinDistribution-method
### kurtosis,AffLinAbscontDistribution-method
### kurtosis,AffLinDiscreteDistribution-method
### kurtosis,AffLinLatticeDistribution-method kurtosis,Arcsine-method
### kurtosis,Beta-method kurtosis,Binom-method kurtosis,Cauchy-method
### kurtosis,Chisq-method kurtosis,Dirac-method kurtosis,DExp-method
### kurtosis,Exp-method kurtosis,Fd-method kurtosis,Gammad-method
### kurtosis,Geom-method kurtosis,Hyper-method kurtosis,Logis-method
### kurtosis,Lnorm-method kurtosis,Nbinom-method kurtosis,Norm-method
### kurtosis,Pois-method kurtosis,Unif-method kurtosis,Weibull-method
### kurtosis,Td-method
### Keywords: methods distribution
### ** Examples
# Variance of Exp(1) distribution
var(Exp())
#median(Exp())
IQR(Exp())
mad(Exp())
# Variance of N(1,4)^2
var(Norm(mean=1, sd=2), fun = function(x){x^2})
var(Norm(mean=1, sd=2), fun = function(x){x^2}, useApply = FALSE)
## sd -- may equivalently be replaced by var
sd(Pois()) ## uses explicit terms
sd(as(Pois(),"DiscreteDistribution")) ## uses sums
sd(as(Pois(),"UnivariateDistribution")) ## uses simulations
sd(Norm(mean=2), fun = function(x){2*x^2}) ## uses simulations
#
mad(sin(exp(Norm()+2*Pois()))) ## weird
|
context("src")
test_that("src structure is as expected", {
expect_is(src_itis, "function")
expect_is(src_gbif, "function")
expect_is(src_tpl, "function")
expect_is(src_col, "function")
})
test_that("src fails well", {
skip_on_cran()
skip_on_travis()
expect_error(src_itis(), "argument \"user\" is missing")
expect_error(src_col(), "Failed to connect")
expect_error(src_tpl(), "argument \"user\" is missing")
expect_error(src_gbif(), "argument \"path\" is missing")
expect_error(src_gbif("Asdf"), "is not TRUE")
f <- tempfile()
expect_error(src_gbif(f), "is not TRUE")
})
| /data/genthat_extracted_code/taxizedb/tests/test-src.R | no_license | surayaaramli/typeRrh | R | false | false | 602 | r | context("src")
test_that("src structure is as expected", {
expect_is(src_itis, "function")
expect_is(src_gbif, "function")
expect_is(src_tpl, "function")
expect_is(src_col, "function")
})
test_that("src fails well", {
skip_on_cran()
skip_on_travis()
expect_error(src_itis(), "argument \"user\" is missing")
expect_error(src_col(), "Failed to connect")
expect_error(src_tpl(), "argument \"user\" is missing")
expect_error(src_gbif(), "argument \"path\" is missing")
expect_error(src_gbif("Asdf"), "is not TRUE")
f <- tempfile()
expect_error(src_gbif(f), "is not TRUE")
})
|
library(tidyverse)
library(rgeos)
library(sf)
library(ggplot2)
library(sp)
library(geosphere)
library(rgdal)
library(tmaptools)
library(dplyr)
#getwd()
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Set projection
wgs.84 <- get_proj4("WGS84", output = "character")
if (!dir.exists("../Data Files/NSWDeptOfEd")) {
dir.create("../Data Files/NSWDeptOfEd")
}
# NSW Government Schools ONLY
download.file("https://data.cese.nsw.gov.au/data/dataset/027493b2-33ad-3f5b-8ed9-37cdca2b8650/resource/2ac19870-44f6-443d-a0c3-4c867f04c305/download/master_dataset.csv"
,destfile = "../Data Files/NSWDeptOfEd/Master_Dataset.csv")
# NSW Non-Government Schools ONLY
download.file("https://data.cese.nsw.gov.au/data/dataset/1d019767-d953-426c-8151-1a6503d5a08a/resource/a5871783-7dd8-4b25-be9e-7d8b9b85422f/download/datahub_nongov_locations-2017.csv"
,destfile = "../Data Files/NSWDeptOfEd/Non_Govt_Schools_Dataset.csv")
# Read in Master Schools data and turn in into a Spatial Points DF
govt_schools <- read_csv("../Data Files/NSWDeptOfEd/Master_Dataset.csv")
non_govt_schools <- read_csv("../Data Files/NSWDeptOfEd/Non_Govt_Schools_Dataset.csv")
govt_schools <- govt_schools %>%
select(School_name
,Town_suburb
,Postcode
,School_gender
,Level_of_schooling
,Latitude
,Longitude)
non_govt_schools <- non_govt_schools %>%
select(School_name= school_name
,Town_suburb = town_suburb
,Postcode = postcode
,School_gender = school_gender
,Level_of_schooling = level_of_schooling
,Latitude = latitude
,Longitude = longitude)
# Create a master data frame of all schools
all_schools <- rbind(govt_schools, non_govt_schools)
# Turn master list into a Spatial Data Frame
coordinates(all_schools) <- c("Longitude", "Latitude")
# Read the ABS SA2 shape file and convert to WGS94 projection
sa2_shp <- readOGR("../Data Files/ABS/SA2_Shapefile/SA2_2016_AUST.shp", p4s = wgs.84)
sa2_nsw_shp <- subset(sa2_shp, STE_NAME16 == "New South Wales")
# Make sure the projections are the same
proj4string(all_schools) <- proj4string(sa2_nsw_shp)
#proj4string(sa2_shp)
#proj4string(data_schools)
# Get the SA2 in which the school resides
all_schools$SA2_Code <- over(all_schools, sa2_nsw_shp)$SA2_MAIN16
# Write All Schools data frame to file.
write_csv(as.data.frame(all_schools), "../Data Files/NSWDeptOfEd/All_NSW_Schools_With_SA2.csv")
# And for the plotsto make sure it looks legit
ggplot() +
geom_path(data = sa2_nsw_shp, aes(x = long, y = lat, group = group)) +
geom_point(data = as.data.frame(all_schools), aes(x = Longitude, y = Latitude), size = 0.1, colour = "red")
| /Raw Data/R Code/NSWGovt_SchoolsMasterDataset_Data_Extract.R | no_license | william-kent-stds/stds_edu | R | false | false | 2,712 | r | library(tidyverse)
library(rgeos)
library(sf)
library(ggplot2)
library(sp)
library(geosphere)
library(rgdal)
library(tmaptools)
library(dplyr)
#getwd()
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Set projection
wgs.84 <- get_proj4("WGS84", output = "character")
if (!dir.exists("../Data Files/NSWDeptOfEd")) {
dir.create("../Data Files/NSWDeptOfEd")
}
# NSW Government Schools ONLY
download.file("https://data.cese.nsw.gov.au/data/dataset/027493b2-33ad-3f5b-8ed9-37cdca2b8650/resource/2ac19870-44f6-443d-a0c3-4c867f04c305/download/master_dataset.csv"
,destfile = "../Data Files/NSWDeptOfEd/Master_Dataset.csv")
# NSW Non-Government Schools ONLY
download.file("https://data.cese.nsw.gov.au/data/dataset/1d019767-d953-426c-8151-1a6503d5a08a/resource/a5871783-7dd8-4b25-be9e-7d8b9b85422f/download/datahub_nongov_locations-2017.csv"
,destfile = "../Data Files/NSWDeptOfEd/Non_Govt_Schools_Dataset.csv")
# Read in Master Schools data and turn in into a Spatial Points DF
govt_schools <- read_csv("../Data Files/NSWDeptOfEd/Master_Dataset.csv")
non_govt_schools <- read_csv("../Data Files/NSWDeptOfEd/Non_Govt_Schools_Dataset.csv")
govt_schools <- govt_schools %>%
select(School_name
,Town_suburb
,Postcode
,School_gender
,Level_of_schooling
,Latitude
,Longitude)
non_govt_schools <- non_govt_schools %>%
select(School_name= school_name
,Town_suburb = town_suburb
,Postcode = postcode
,School_gender = school_gender
,Level_of_schooling = level_of_schooling
,Latitude = latitude
,Longitude = longitude)
# Create a master data frame of all schools
all_schools <- rbind(govt_schools, non_govt_schools)
# Turn master list into a Spatial Data Frame
coordinates(all_schools) <- c("Longitude", "Latitude")
# Read the ABS SA2 shape file and convert to WGS94 projection
sa2_shp <- readOGR("../Data Files/ABS/SA2_Shapefile/SA2_2016_AUST.shp", p4s = wgs.84)
sa2_nsw_shp <- subset(sa2_shp, STE_NAME16 == "New South Wales")
# Make sure the projections are the same
proj4string(all_schools) <- proj4string(sa2_nsw_shp)
#proj4string(sa2_shp)
#proj4string(data_schools)
# Get the SA2 in which the school resides
all_schools$SA2_Code <- over(all_schools, sa2_nsw_shp)$SA2_MAIN16
# Write All Schools data frame to file.
write_csv(as.data.frame(all_schools), "../Data Files/NSWDeptOfEd/All_NSW_Schools_With_SA2.csv")
# And for the plotsto make sure it looks legit
ggplot() +
geom_path(data = sa2_nsw_shp, aes(x = long, y = lat, group = group)) +
geom_point(data = as.data.frame(all_schools), aes(x = Longitude, y = Latitude), size = 0.1, colour = "red")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Zagg.R
\name{Zagg}
\alias{Zagg}
\title{Processing output of label switching function}
\usage{
Zagg(USout, .Y = Y)
}
\arguments{
\item{output}{of UNIVARIATE label switching function}
}
\description{
explain here
}
\examples{
#nope
}
\keyword{postprocessing}
| /man/Zagg.Rd | no_license | zoevanhavre/Zmix_devVersion2 | R | false | false | 344 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Zagg.R
\name{Zagg}
\alias{Zagg}
\title{Processing output of label switching function}
\usage{
Zagg(USout, .Y = Y)
}
\arguments{
\item{output}{of UNIVARIATE label switching function}
}
\description{
explain here
}
\examples{
#nope
}
\keyword{postprocessing}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rocCI.R
\name{ciROC}
\alias{ciROC}
\title{Confidence Interval of ROC curve}
\usage{
ciROC(object, ...)
}
\arguments{
\item{object}{An object of class \code{"rocit"}, returned by
\code{\link{rocit}}. Supports \code{"empirical"} and \code{"binormal"}
ROC curve.}
\item{...}{Arguments to be passed to methods.
See \code{\link{ciROC.rocit}}.}
}
\description{
See \code{\link{ciROC.rocit}}.
}
| /man/ciROC.Rd | no_license | cran/ROCit | R | false | true | 486 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rocCI.R
\name{ciROC}
\alias{ciROC}
\title{Confidence Interval of ROC curve}
\usage{
ciROC(object, ...)
}
\arguments{
\item{object}{An object of class \code{"rocit"}, returned by
\code{\link{rocit}}. Supports \code{"empirical"} and \code{"binormal"}
ROC curve.}
\item{...}{Arguments to be passed to methods.
See \code{\link{ciROC.rocit}}.}
}
\description{
See \code{\link{ciROC.rocit}}.
}
|
##SAMPLING FUNCTIONS FROM PRIOR DISTRIBUTIONS
samp_int_vec<-function(x=1,y=1:10){
#x is an integer, y is a vector
out<-c()
for (i in 1:length(y)){
if (x!=y[i]){
out[i]<-sample(x:y[i],1,replace=T)
}else{
out[i]<-x
}
}
return(out)
}
samp_vec_int<-function(x=1:10,y=1){
#x is a vector, y is an integer
out<-c()
for (i in 1:length(x)){
if (x[i]!=y){
out[i]<-sample(x[i]:y,1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
samp_vec_vec<-function(x=1:10,y=1:10){
#x is a vector, y is a vector
out<-c()
for (i in 1:length(y)){
if (x[i]!=y[i]){
out[i]<-sample(x[i]:y[i],1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
args<-commandArgs(trailingOnly=TRUE)
ms<-"/opt/software/genetics/ms/ms"
cpd<-"./"
mod<-"mod_1a"
nchr<-as.character(args[1])
tgen<-25
mu<-1.5e-8
recomb<-as.numeric(args[4])#recombination rate
ll<-as.numeric(args[2])#locus length
nsims<-as.numeric(args[5])#number of ABC simulations
nloci<-as.numeric(args[3])#loci to simulate in each sim
out<-paste(mod,"_ll",as.character(ll),"_nl",as.character(nloci),"_r",as.character(recomb),"_nc",nchr,sep="")
##Prior distributions parameters as in Nater et al. 2017
#Ne Present Time
Ne1BO<-sample(300:32000,nsims,replace=T)
Ne2BO<-sample(300:32000,nsims,replace=T)
Ne3BO<-sample(300:32000,nsims,replace=T)
Ne4BO<-sample(300:32000,nsims,replace=T)
NeST<-sample(300:32000,nsims,replace=T)
Ne1NT<-sample(300:32000,nsims,replace=T)
Ne2NT<-sample(300:32000,nsims,replace=T)
#Migrations
#Mig SUBPOP BO; SUBPOP NT
MigBO<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1))) #loguniform
MigNT<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1)))
#Mig ST-subPop NT
Mig56<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig65<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig57<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig75<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig ST popAnc NT
MigSTNT<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
MigNTST<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig BO-ST
MigBOST<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
MigSTBO<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
#Bottleneck Intensity Borneo
NeancBO<-samp_vec_int(Ne1BO,320000)
rBO<-Ne1BO/NeancBO
#Ne Ancient
NeancST<-samp_vec_int(NeST,100000)
Neanc1NT<-samp_vec_int(Ne1NT,320000)
Neanc2NT<-samp_vec_int(Ne2NT,320000)
NeancNT<-sample(1000:100000,nsims,replace=T)
#Events Times
tsep4BO<-sample(8750:400000,nsims,replace=T)
BottDur<-sample(250:100000,nsims,replace=T)
tbottend<-tsep4BO+BottDur
tbotBO<-sample(100:1000000,nsims,replace=T)
tsepBOST<-tbottend+tbotBO
tsepBOST[tsepBOST<400000]<-sample(400000:1500000,sum(tsepBOST<400000),replace=T)
tStopMig<-samp_vec_vec(tbottend,tsepBOST)
tBotNT<-sample(250:100000,nsims,replace=T)
tBotST<-tBotNT
tstrNT<-sample(100000:1500000,nsims,replace=T)
tsepNTST<-sample(1500000:4000000,nsims,replace=T)
##SCALED PARAMETERS
theta<-4*Ne1BO*mu*ll
srec<-4*Ne1BO*(recomb*(ll-1))
sNe1BO<-Ne1BO*4*mu*ll/theta
sNe2BO<-Ne2BO*4*mu*ll/theta
sNe3BO<-Ne3BO*4*mu*ll/theta
sNe4BO<-Ne4BO*4*mu*ll/theta
sNeST<-NeST*4*mu*ll/theta
sNe1NT<-Ne1NT*4*mu*ll/theta
sNe2NT<-Ne2NT*4*mu*ll/theta
sMigBO<-MigBO*4*Ne1BO
sMigNT<-MigNT*4*Ne1BO
sMig56<-Mig56*4*Ne1BO
sMig65<-Mig65*4*Ne1BO
sMig57<-Mig57*4*Ne1BO
sMig75<-Mig75*4*Ne1BO
sMigSTNT<-MigSTNT*4*Ne1BO
sMigNTST<-MigNTST*4*Ne1BO
sMigBOST<-MigBOST*4*Ne1BO
sMigSTBO<-MigSTBO*4*Ne1BO
sNeancBO<-NeancBO*4*mu*ll/theta
sNeancST<-NeancST*4*mu*ll/theta
sNeancNT<-NeancNT*4*mu*ll/theta
sNeanc1NT<-Neanc1NT*4*mu*ll/theta
sNeanc2NT<-Neanc2NT*4*mu*ll/theta
stsep4BO<-(tsep4BO/tgen)/(4*Ne1BO)
stbottend<-(tbottend/tgen)/(4*Ne1BO)
stsepBOST<-(tsepBOST/tgen)/(4*Ne1BO)
stStopMig<-(tStopMig/tgen)/(4*Ne1BO)
stBotNT<-(tBotNT/tgen)/(4*Ne1BO)
stBotST<-stBotNT
ststrNT<-(tstrNT/tgen)/(4*Ne1BO)
stsepNTST<-(tsepNTST/tgen)/(4*Ne1BO)
#Output: parameters files
partable<-cbind(Ne1BO,Ne2BO,Ne3BO,Ne4BO,NeST,Ne1NT,Ne2NT,MigBO,MigNT,Mig56,Mig65,Mig57,Mig75,MigSTNT,MigNTST,MigBOST,MigSTBO,NeancBO,rBO,NeancST,Neanc1NT,Neanc2NT,tsep4BO,BottDur,tbottend,tbotBO,tsepBOST,tStopMig,tBotNT,tBotST,tstrNT,tsepNTST)
colnames(partable)<-c("Ne1BO","Ne2BO","Ne3BO","Ne4BO","NeST","Ne1NT","Ne2NT","MigBO","MigNT","Mig56","Mig65","Mig57","Mig75","MigSTNT","MigNTST","MigBOST","MigSTBO","NeancBO","rBO","NeancST","Neanc1NT","Neanc2NT","tsep4BO","BottDur","tbottend","tbotBO","tsepBOST","tStopMig","tBotNT","tBotST","tstrNT","tsepNTST")
partablescaled<-cbind(sNe1BO,sNe2BO,sNe3BO,sNe4BO,sNeST,sNe1NT,sNe2NT,sMigBO,sMigNT,sMig56,sMig65,sMig57,sMig75,sMigSTNT,sMigNTST,sMigBOST,sMigSTBO,sNeancBO,rBO,sNeanc1NT,sNeanc2NT,sNeancST,stsep4BO,stbottend,stsepBOST,stStopMig,stBotNT,stBotST,ststrNT,stsepNTST)
write.table(partable,paste(out,".1.param",sep=""),row.names=F,quote=F,sep="\t")
write.table(partablescaled,paste(out,".1.paramscaled",sep=""),row.names=F,col.names=T,quote=F,sep="\t")
#Output summary statistics: FDSS
i<-1
for (i in 1:nsims){
s<-c()
s[1]<-paste(" -ej ",as.character(stsep4BO[i])," 4 1 ",sep="")
s[2]<-paste(" -ej ",as.character(stsep4BO[i])," 3 1 ",sep="")
s[3]<-paste(" -ej ",as.character(stsep4BO[i])," 2 1 ",sep="")
s[4]<-paste(" -en ",as.character(stsep4BO[i])," 1 ", as.character(rBO[i]),sep="")
s[5]<-paste(" -em ",as.character(stStopMig[i])," 1 5 ", as.character(sMigBOST[i]),sep="")
s[6]<-paste(" -em ",as.character(stStopMig[i])," 5 1 ", as.character(sMigSTBO[i]),sep="")
s[7]<-paste(" -en ",as.character(stBotST[i])," 5 ", as.character(sNeancST[i]),sep="")
s[8]<-paste(" -en ",as.character(stBotNT[i])," 6 ", as.character(sNeanc1NT[i]),sep="")
s[9]<-paste(" -en ",as.character(stBotNT[i])," 7 ", as.character(sNeanc2NT[i]),sep="")
s[10]<-paste(" -en ",as.character(stbottend[i])," 1 ", as.character(sNeancBO[i]),sep="")
s[11]<-paste(" -ej ",as.character(stsepBOST[i])," 1 5 ",sep="")
s[12]<-paste(" -ej ",as.character(ststrNT[i])," 7 6 ",sep="")
s[13]<-paste(" -en ",as.character(ststrNT[i])," 6 ", as.character(sNeancNT[i]),sep="")
s[14]<-paste(" -em ",as.character(ststrNT[i])," 5 6 ", as.character(sMigSTNT[i]),sep="")
s[15]<-paste(" -em ",as.character(ststrNT[i])," 6 5 ", as.character(sMigNTST[i]),sep="")
s[16]<-paste(" -ej ",as.character(stsepNTST[i])," 6 5 ",sep="")
s1<-c()
s1[1]<-stsep4BO[i]
s1[2]<-stsep4BO[i]
s1[3]<-stsep4BO[i]
s1[4]<-stsep4BO[i]
s1[5]<-stStopMig[i]
s1[6]<-stStopMig[i]
s1[7]<-stBotST[i]
s1[8]<-stBotNT[i]
s1[9]<-stBotNT[i]
s1[10]<-stbottend[i]
s1[11]<-stsepBOST[i]
s1[12]<-ststrNT[i]
s1[13]<-ststrNT[i]
s1[14]<-ststrNT[i]
s1[15]<-ststrNT[i]
s1[16]<-stsepNTST[i]
sid<-sort(s1,index.return=T)
s_sort<-s[sid$ix]
part1<-paste(s_sort,collapse="")
li1<-paste(ms," ",as.character(7*as.numeric(nchr))," ",as.character(nloci)," -t ",as.character(theta[i])," -r ",as.character(srec[i])," ",as.character(ll)," -I 7 ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," -n 1 ",as.character(sNe1BO[i])," -n 2 ",as.character(sNe2BO[i])," -n 3 ",as.character(sNe3BO[i])," -n 4 ",as.character(sNe4BO[i])," -n 5 ",as.character(sNeST[i])," -n 6 ",as.character(sNe1NT[i])," -n 7 ",as.character(sNe2NT[i])," -m 1 2 ",as.character(sMigBO[i])," -m 2 1 ",as.character(sMigBO[i])," -m 1 3 ",as.character(sMigBO[i])," -m 3 1 ",as.character(sMigBO[i])," -m 1 4 ",as.character(sMigBO[i])," -m 4 1 ",as.character(sMigBO[i])," -m 2 3 ",as.character(sMigBO[i])," -m 3 2 ",as.character(sMigBO[i])," -m 2 4 ",as.character(sMigBO[i])," -m 4 2 ",as.character(sMigBO[i])," -m 3 4 ",as.character(sMigBO[i])," -m 4 3 ",as.character(sMigBO[i])," -m 5 6 ",as.character(sMig56[i])," -m 6 5 ",as.character(sMig65[i])," -m 5 7 ",as.character(sMig57[i])," -m 7 5 ",as.character(sMig75[i])," -m 6 7 ",as.character(sMigNT[i])," -m 7 6 ",as.character(sMigNT[i]), part1, sep="")
print(i)
#print(li1)
if (i==1){
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s > ",out,".tab",sep=""))
}else{
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s >> ",out,".tab",sep=""))
}
}
| /Orangos/mod_1a.r | no_license | anbena/ABC-FDSS | R | false | false | 8,035 | r | ##SAMPLING FUNCTIONS FROM PRIOR DISTRIBUTIONS
samp_int_vec<-function(x=1,y=1:10){
#x is an integer, y is a vector
out<-c()
for (i in 1:length(y)){
if (x!=y[i]){
out[i]<-sample(x:y[i],1,replace=T)
}else{
out[i]<-x
}
}
return(out)
}
samp_vec_int<-function(x=1:10,y=1){
#x is a vector, y is an integer
out<-c()
for (i in 1:length(x)){
if (x[i]!=y){
out[i]<-sample(x[i]:y,1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
samp_vec_vec<-function(x=1:10,y=1:10){
#x is a vector, y is a vector
out<-c()
for (i in 1:length(y)){
if (x[i]!=y[i]){
out[i]<-sample(x[i]:y[i],1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
args<-commandArgs(trailingOnly=TRUE)
ms<-"/opt/software/genetics/ms/ms"
cpd<-"./"
mod<-"mod_1a"
nchr<-as.character(args[1])
tgen<-25
mu<-1.5e-8
recomb<-as.numeric(args[4])#recombination rate
ll<-as.numeric(args[2])#locus length
nsims<-as.numeric(args[5])#number of ABC simulations
nloci<-as.numeric(args[3])#loci to simulate in each sim
out<-paste(mod,"_ll",as.character(ll),"_nl",as.character(nloci),"_r",as.character(recomb),"_nc",nchr,sep="")
##Prior distributions parameters as in Nater et al. 2017
#Ne Present Time
Ne1BO<-sample(300:32000,nsims,replace=T)
Ne2BO<-sample(300:32000,nsims,replace=T)
Ne3BO<-sample(300:32000,nsims,replace=T)
Ne4BO<-sample(300:32000,nsims,replace=T)
NeST<-sample(300:32000,nsims,replace=T)
Ne1NT<-sample(300:32000,nsims,replace=T)
Ne2NT<-sample(300:32000,nsims,replace=T)
#Migrations
#Mig SUBPOP BO; SUBPOP NT
MigBO<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1))) #loguniform
MigNT<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1)))
#Mig ST-subPop NT
Mig56<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig65<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig57<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig75<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig ST popAnc NT
MigSTNT<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
MigNTST<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig BO-ST
MigBOST<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
MigSTBO<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
#Bottleneck Intensity Borneo
NeancBO<-samp_vec_int(Ne1BO,320000)
rBO<-Ne1BO/NeancBO
#Ne Ancient
NeancST<-samp_vec_int(NeST,100000)
Neanc1NT<-samp_vec_int(Ne1NT,320000)
Neanc2NT<-samp_vec_int(Ne2NT,320000)
NeancNT<-sample(1000:100000,nsims,replace=T)
#Events Times
tsep4BO<-sample(8750:400000,nsims,replace=T)
BottDur<-sample(250:100000,nsims,replace=T)
tbottend<-tsep4BO+BottDur
tbotBO<-sample(100:1000000,nsims,replace=T)
tsepBOST<-tbottend+tbotBO
tsepBOST[tsepBOST<400000]<-sample(400000:1500000,sum(tsepBOST<400000),replace=T)
tStopMig<-samp_vec_vec(tbottend,tsepBOST)
tBotNT<-sample(250:100000,nsims,replace=T)
tBotST<-tBotNT
tstrNT<-sample(100000:1500000,nsims,replace=T)
tsepNTST<-sample(1500000:4000000,nsims,replace=T)
##SCALED PARAMETERS
theta<-4*Ne1BO*mu*ll
srec<-4*Ne1BO*(recomb*(ll-1))
sNe1BO<-Ne1BO*4*mu*ll/theta
sNe2BO<-Ne2BO*4*mu*ll/theta
sNe3BO<-Ne3BO*4*mu*ll/theta
sNe4BO<-Ne4BO*4*mu*ll/theta
sNeST<-NeST*4*mu*ll/theta
sNe1NT<-Ne1NT*4*mu*ll/theta
sNe2NT<-Ne2NT*4*mu*ll/theta
sMigBO<-MigBO*4*Ne1BO
sMigNT<-MigNT*4*Ne1BO
sMig56<-Mig56*4*Ne1BO
sMig65<-Mig65*4*Ne1BO
sMig57<-Mig57*4*Ne1BO
sMig75<-Mig75*4*Ne1BO
sMigSTNT<-MigSTNT*4*Ne1BO
sMigNTST<-MigNTST*4*Ne1BO
sMigBOST<-MigBOST*4*Ne1BO
sMigSTBO<-MigSTBO*4*Ne1BO
sNeancBO<-NeancBO*4*mu*ll/theta
sNeancST<-NeancST*4*mu*ll/theta
sNeancNT<-NeancNT*4*mu*ll/theta
sNeanc1NT<-Neanc1NT*4*mu*ll/theta
sNeanc2NT<-Neanc2NT*4*mu*ll/theta
stsep4BO<-(tsep4BO/tgen)/(4*Ne1BO)
stbottend<-(tbottend/tgen)/(4*Ne1BO)
stsepBOST<-(tsepBOST/tgen)/(4*Ne1BO)
stStopMig<-(tStopMig/tgen)/(4*Ne1BO)
stBotNT<-(tBotNT/tgen)/(4*Ne1BO)
stBotST<-stBotNT
ststrNT<-(tstrNT/tgen)/(4*Ne1BO)
stsepNTST<-(tsepNTST/tgen)/(4*Ne1BO)
#Output: parameters files
partable<-cbind(Ne1BO,Ne2BO,Ne3BO,Ne4BO,NeST,Ne1NT,Ne2NT,MigBO,MigNT,Mig56,Mig65,Mig57,Mig75,MigSTNT,MigNTST,MigBOST,MigSTBO,NeancBO,rBO,NeancST,Neanc1NT,Neanc2NT,tsep4BO,BottDur,tbottend,tbotBO,tsepBOST,tStopMig,tBotNT,tBotST,tstrNT,tsepNTST)
colnames(partable)<-c("Ne1BO","Ne2BO","Ne3BO","Ne4BO","NeST","Ne1NT","Ne2NT","MigBO","MigNT","Mig56","Mig65","Mig57","Mig75","MigSTNT","MigNTST","MigBOST","MigSTBO","NeancBO","rBO","NeancST","Neanc1NT","Neanc2NT","tsep4BO","BottDur","tbottend","tbotBO","tsepBOST","tStopMig","tBotNT","tBotST","tstrNT","tsepNTST")
partablescaled<-cbind(sNe1BO,sNe2BO,sNe3BO,sNe4BO,sNeST,sNe1NT,sNe2NT,sMigBO,sMigNT,sMig56,sMig65,sMig57,sMig75,sMigSTNT,sMigNTST,sMigBOST,sMigSTBO,sNeancBO,rBO,sNeanc1NT,sNeanc2NT,sNeancST,stsep4BO,stbottend,stsepBOST,stStopMig,stBotNT,stBotST,ststrNT,stsepNTST)
write.table(partable,paste(out,".1.param",sep=""),row.names=F,quote=F,sep="\t")
write.table(partablescaled,paste(out,".1.paramscaled",sep=""),row.names=F,col.names=T,quote=F,sep="\t")
#Output summary statistics: FDSS
i<-1
for (i in 1:nsims){
s<-c()
s[1]<-paste(" -ej ",as.character(stsep4BO[i])," 4 1 ",sep="")
s[2]<-paste(" -ej ",as.character(stsep4BO[i])," 3 1 ",sep="")
s[3]<-paste(" -ej ",as.character(stsep4BO[i])," 2 1 ",sep="")
s[4]<-paste(" -en ",as.character(stsep4BO[i])," 1 ", as.character(rBO[i]),sep="")
s[5]<-paste(" -em ",as.character(stStopMig[i])," 1 5 ", as.character(sMigBOST[i]),sep="")
s[6]<-paste(" -em ",as.character(stStopMig[i])," 5 1 ", as.character(sMigSTBO[i]),sep="")
s[7]<-paste(" -en ",as.character(stBotST[i])," 5 ", as.character(sNeancST[i]),sep="")
s[8]<-paste(" -en ",as.character(stBotNT[i])," 6 ", as.character(sNeanc1NT[i]),sep="")
s[9]<-paste(" -en ",as.character(stBotNT[i])," 7 ", as.character(sNeanc2NT[i]),sep="")
s[10]<-paste(" -en ",as.character(stbottend[i])," 1 ", as.character(sNeancBO[i]),sep="")
s[11]<-paste(" -ej ",as.character(stsepBOST[i])," 1 5 ",sep="")
s[12]<-paste(" -ej ",as.character(ststrNT[i])," 7 6 ",sep="")
s[13]<-paste(" -en ",as.character(ststrNT[i])," 6 ", as.character(sNeancNT[i]),sep="")
s[14]<-paste(" -em ",as.character(ststrNT[i])," 5 6 ", as.character(sMigSTNT[i]),sep="")
s[15]<-paste(" -em ",as.character(ststrNT[i])," 6 5 ", as.character(sMigNTST[i]),sep="")
s[16]<-paste(" -ej ",as.character(stsepNTST[i])," 6 5 ",sep="")
s1<-c()
s1[1]<-stsep4BO[i]
s1[2]<-stsep4BO[i]
s1[3]<-stsep4BO[i]
s1[4]<-stsep4BO[i]
s1[5]<-stStopMig[i]
s1[6]<-stStopMig[i]
s1[7]<-stBotST[i]
s1[8]<-stBotNT[i]
s1[9]<-stBotNT[i]
s1[10]<-stbottend[i]
s1[11]<-stsepBOST[i]
s1[12]<-ststrNT[i]
s1[13]<-ststrNT[i]
s1[14]<-ststrNT[i]
s1[15]<-ststrNT[i]
s1[16]<-stsepNTST[i]
sid<-sort(s1,index.return=T)
s_sort<-s[sid$ix]
part1<-paste(s_sort,collapse="")
li1<-paste(ms," ",as.character(7*as.numeric(nchr))," ",as.character(nloci)," -t ",as.character(theta[i])," -r ",as.character(srec[i])," ",as.character(ll)," -I 7 ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," -n 1 ",as.character(sNe1BO[i])," -n 2 ",as.character(sNe2BO[i])," -n 3 ",as.character(sNe3BO[i])," -n 4 ",as.character(sNe4BO[i])," -n 5 ",as.character(sNeST[i])," -n 6 ",as.character(sNe1NT[i])," -n 7 ",as.character(sNe2NT[i])," -m 1 2 ",as.character(sMigBO[i])," -m 2 1 ",as.character(sMigBO[i])," -m 1 3 ",as.character(sMigBO[i])," -m 3 1 ",as.character(sMigBO[i])," -m 1 4 ",as.character(sMigBO[i])," -m 4 1 ",as.character(sMigBO[i])," -m 2 3 ",as.character(sMigBO[i])," -m 3 2 ",as.character(sMigBO[i])," -m 2 4 ",as.character(sMigBO[i])," -m 4 2 ",as.character(sMigBO[i])," -m 3 4 ",as.character(sMigBO[i])," -m 4 3 ",as.character(sMigBO[i])," -m 5 6 ",as.character(sMig56[i])," -m 6 5 ",as.character(sMig65[i])," -m 5 7 ",as.character(sMig57[i])," -m 7 5 ",as.character(sMig75[i])," -m 6 7 ",as.character(sMigNT[i])," -m 7 6 ",as.character(sMigNT[i]), part1, sep="")
print(i)
#print(li1)
if (i==1){
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s > ",out,".tab",sep=""))
}else{
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s >> ",out,".tab",sep=""))
}
}
|
# Coursera Course - Exploratory Data Analysis
# Week 1 - Project 1
# plot3.R
# Author: J Szijjarto
# nrows=2076000
library(data.table)
plot3 <- function() {
data_file = "./data/household_power_consumption.txt"
col_classes = c("Date", "character", "numeric","numeric",
"numeric","numeric","numeric","numeric","numeric")
hpc_df <- fread(data_file, na.strings="?", sep=";",
header=TRUE, colClasses=col_classes)
hpc_df_feb <- hpc_df[(hpc_df$Date=="1/2/2007" | hpc_df$Date=="2/2/2007"),]
hpc_df_feb$Datetime <- as.POSIXct(
strptime(paste(hpc_df_feb$Date,hpc_df_feb$Time), "%d/%m/%Y %H:%M:%S"))
plot_colors <- c("black","red","blue")
plot(hpc_df_feb$Datetime,hpc_df_feb$Sub_metering_1,type="l",
col=plot_colors[1], xlab="", ylab="Energy sub metering")
lines(hpc_df_feb$Datetime,hpc_df_feb$Sub_metering_2,type="l",
col=plot_colors[2])
lines(hpc_df_feb$Datetime,hpc_df_feb$Sub_metering_3,type="l",
col=plot_colors[3])
col_names <- c("Sub metering 1","Sub metering 2", "Sub metering 3")
legend("topright", col=plot_colors, col_names, lty=1, cex=0.7)
dev.copy(png, file = "plot3.png", width=480, height=480)
dev.off ()
}
| /plot3.R | no_license | ljszijjarto/ExData_Plotting1 | R | false | false | 1,209 | r | # Coursera Course - Exploratory Data Analysis
# Week 1 - Project 1
# plot3.R
# Author: J Szijjarto
# nrows=2076000
library(data.table)
plot3 <- function() {
data_file = "./data/household_power_consumption.txt"
col_classes = c("Date", "character", "numeric","numeric",
"numeric","numeric","numeric","numeric","numeric")
hpc_df <- fread(data_file, na.strings="?", sep=";",
header=TRUE, colClasses=col_classes)
hpc_df_feb <- hpc_df[(hpc_df$Date=="1/2/2007" | hpc_df$Date=="2/2/2007"),]
hpc_df_feb$Datetime <- as.POSIXct(
strptime(paste(hpc_df_feb$Date,hpc_df_feb$Time), "%d/%m/%Y %H:%M:%S"))
plot_colors <- c("black","red","blue")
plot(hpc_df_feb$Datetime,hpc_df_feb$Sub_metering_1,type="l",
col=plot_colors[1], xlab="", ylab="Energy sub metering")
lines(hpc_df_feb$Datetime,hpc_df_feb$Sub_metering_2,type="l",
col=plot_colors[2])
lines(hpc_df_feb$Datetime,hpc_df_feb$Sub_metering_3,type="l",
col=plot_colors[3])
col_names <- c("Sub metering 1","Sub metering 2", "Sub metering 3")
legend("topright", col=plot_colors, col_names, lty=1, cex=0.7)
dev.copy(png, file = "plot3.png", width=480, height=480)
dev.off ()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/harvard-botanists.R
\name{bot_search}
\alias{bot_search}
\title{Harvard botanist index functions}
\usage{
bot_search(name = NULL, individual = FALSE, start = NULL, fuzzy = FALSE,
remarks = NULL, speciality = NULL, country = NULL,
is_collector = FALSE, is_author = FALSE, team = FALSE, error = stop,
...)
}
\description{
Harvard botanist index functions
}
\examples{
\dontrun{
# bot_search(name = "Asa Gray")
# bot_search(name = "A. Gray")
# bot_search(remarks = "harvard")
# bot_search(name = "Gray", fuzzy = TRUE)
## FIXME - this leads to a JSON parsing error because they give
## bad JSON in some results, including this example
# bot_search(country = "China")
}
}
\keyword{internal}
| /man/bot_search.Rd | permissive | AshwinAgrawal16/scrubr | R | false | true | 774 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/harvard-botanists.R
\name{bot_search}
\alias{bot_search}
\title{Harvard botanist index functions}
\usage{
bot_search(name = NULL, individual = FALSE, start = NULL, fuzzy = FALSE,
remarks = NULL, speciality = NULL, country = NULL,
is_collector = FALSE, is_author = FALSE, team = FALSE, error = stop,
...)
}
\description{
Harvard botanist index functions
}
\examples{
\dontrun{
# bot_search(name = "Asa Gray")
# bot_search(name = "A. Gray")
# bot_search(remarks = "harvard")
# bot_search(name = "Gray", fuzzy = TRUE)
## FIXME - this leads to a JSON parsing error because they give
## bad JSON in some results, including this example
# bot_search(country = "China")
}
}
\keyword{internal}
|
library(tidyverse)
library(broom)
library(modelr)
library(nls.multstart)
sea <- read_csv("data-processed/sea_processed2.csv")
TT_fit <- sea %>%
filter(species == "TT") %>%
filter(temperature < 34) %>%
# filter(cell_density > 1000) %>%
# mutate(cell_density = ifelse(cell_density < 2000, 1000, cell_density)) %>%
mutate(cell_density = ifelse(cell_density == 2200, 1200, cell_density)) %>%
# mutate(cell_density = ifelse(cell_density < 2200 & temperature == 5, 500, cell_density)) %>%
filter(cell_density != 19767, cell_density != 30185, cell_density != 23949, cell_density != 5638, cell_density != 6505,
cell_density != 14164, cell_density != 13597, cell_density != 14438, cell_density != 14650,
cell_density != 15049,cell_density != 14530, cell_density != 5993) %>%
select(temperature, rep, cell_density, cell_volume, time_since_innoc_hours, start_time) %>%
mutate(time_since_innoc_hours = ifelse(is.na(time_since_innoc_hours), 12.18056, time_since_innoc_hours)) %>%
mutate(days = time_since_innoc_hours/24) %>%
unite(unique_id, temperature, rep, remove = FALSE, sep = "_")
write_csv(TT_fit, "data-processed/TT_fit_edit.csv")
write_csv(TT_fit, "data-processed/TT_fit_edit-final.csv") ## without 38C
TT_25 <- TT_fit %>%
filter(temperature == 25, days < 17)
TT_25 %>%
ggplot(aes(x = days, y = cell_density)) + geom_point() +
facet_wrap( ~ rep)
fits_many <- TT_fit %>%
group_by(unique_id) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = .x,
iter = 1000,
start_lower = c(K = 1000, r = 0),
start_upper = c(K = 20000, r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 50000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
# get summary info
info <- fits_many %>%
unnest(fit %>% map(glance))
# get params
params <- fits_many %>%
unnest(fit %>% map(tidy))
# get confidence intervals
CI <- fits_many %>%
unnest(fit %>% map(~ confint2(.x) %>%
data.frame() %>%
rename(., conf.low = X2.5.., conf.high = X97.5..))) %>%
group_by(., unique_id) %>%
mutate(., term = c('K', 'r')) %>%
ungroup()
params <- merge(params, CI, by = intersect(names(params), names(CI)))
write_csv(params, "data-processed/params-edit.csv")
params <- read_csv("data-processed/params-edit.csv")
params %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
ggplot(aes(x = temperature, y = estimate)) + geom_point() +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high)) +
facet_wrap( ~ term, scales = "free")
params %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ungroup() %>%
lm(log(estimate) ~ inverse_temp, data = .) %>% summary
tidy(., conf.int = TRUE)
### now try with mass^1/4
all_p <- left_join(params, masses)
write_csv(all_p, "data-processed/K-params-masses.csv")
all_p %>%
# separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ungroup() %>%
lm(log(estimate*(mean_size^(3/4))) ~ inverse_temp, data = .) %>%
tidy(., conf.int = TRUE)
all_p %>%
# separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ungroup() %>%
lm(log(estimate*(68.51255^(3/4))) ~ inverse_temp, data = .) %>%
tidy(., conf.int = TRUE)
all_p %>%
summarise(mean_all_size = mean(mean_size))
all_p %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ggplot(aes(x = inverse_temp, y = log(estimate*(mean_size^(3/4))))) +
geom_smooth(method = "lm", color = "black") +
geom_point(size = 4, alpha = 0.5) +
geom_point(size = 4, shape = 1) +
scale_x_reverse(sec.axis = sec_axis(~((1/(.*8.62 * 10^(-5)))-273.15))) + xlab("Temperature (1/kT)") +
ylab("Ln (carrying capacity (cells/ml) * M^3/4)") +
theme(text = element_text(size=12, family = "Arial")) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5)) +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
theme_bw() +
theme(text = element_text(size=12, family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.5),
plot.title = element_text(hjust = 0.5, size = 12)) +
ggtitle("Temperature (°C)")
ggsave("figures/figure2_mass_exponent.pdf", width = 4, height = 3.5)
params %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ggplot(aes(x = inverse_temp, y = log(estimate))) +
geom_smooth(method = "lm", color = "black") +
geom_point(size = 4, alpha = 0.5) +
geom_point(size = 4, shape = 1) +
scale_x_reverse(sec.axis = sec_axis(~((1/(.*8.62 * 10^(-5)))-273.15))) + xlab("Temperature (1/kT)") +
ylab("Ln carrying capacity (cells/ml)") +
theme(text = element_text(size=12, family = "Arial")) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5)) +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
theme_bw() +
theme(text = element_text(size=12, family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.5),
plot.title = element_text(hjust = 0.5, size = 12)) +
ggtitle("Temperature (°C)")
ggsave("figures/figure2_no_32_edit.pdf", width = 4, height = 3.5)
new_preds <- TT_fit %>%
do(., data.frame(days = seq(min(.$days), max(.$days), length.out = 150), stringsAsFactors = FALSE))
preds2 <- fits_many %>%
unnest(fit %>% map(augment, newdata = new_preds))
preds3 <- preds2 %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
ggplot() +
# geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot, temperature < 33), alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds3, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_grid(temperature ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = filter(TT_fit, temperature < 33)) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
ggsave("figures/growth_trajectories_boot2_withCI_32C_new.pdf", width = 10, height = 10)
fit_growth <- function(data){
df <- data
res <- nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = df,
iter = 500,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 10000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 50000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))
expected<-logistic(df$days, coef(res)[2], coef(res)[1])
rsqr<-1-sum((df$cell_density-expected)^2)/sum((df$cell_density-mean(df$cell_density))^2)
names(rsqr) <- "rsquared"
unique_id <- df$unique_id[[1]]
all <- cbind(rsqr, unique_id)
return(all)
}
logistic <- function(days, r, K){
res <- K/(1 + (K/1200 - 1)*exp(-r*days))
res
}
tt_split <- TT_fit %>%
filter(temperature < 32) %>%
split(.$unique_id)
all_output1 <- tt_split %>%
map_df(fit_growth)
rsqrs <- all_output1 %>%
t(.) %>%
data.frame(.) %>%
rename(rsq = X1,
unique_id = X2)
write_csv(rsqrs, "data-processed/rsq-edit.csv")
View(rsqrs)
rsqrs %>%
filter(!grepl("32", unique_id)) %>%
mutate(rsq = as.numeric(as.character(rsq))) %>%
summarise(mean_r = mean(rsq))
# Fit 32 separately -------------------------------------------------------
TT_32 <- TT_fit %>%
filter(temperature == 32) %>%
mutate(cell_density = ifelse(cell_density == 2200, 1200, cell_density))
fits_32 <- TT_32 %>%
group_by(unique_id) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(cell_density ~ K/(1 + (K/1000 - 1)*exp(-r*days)),
data = .x,
iter = 500,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 5000, r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 5000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
preds32 <- fits_32 %>%
unnest(fit %>% map(augment, newdata = new_preds))
preds32 <- preds32 %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
params32 <- fits_32 %>%
unnest(fit %>% map(tidy))
write_csv(params32, "data-processed/params32.csv")
# get confidence intervals
CI32 <- fits_32 %>%
unnest(fit %>% map(~ confint2(.x) %>%
data.frame() %>%
rename(., conf.low = X2.5.., conf.high = X97.5..))) %>%
group_by(., unique_id) %>%
mutate(., term = c('K', 'r')) %>%
ungroup()
params32 <- merge(params32, CI32, by = intersect(names(params32), names(CI32)))
fit_growth32 <- function(data){
df <- data
res <- nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = df,
iter = 500,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 1000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 5000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))
expected<-logistic(df$days, coef(res)[2], coef(res)[1])
rsqr<-1-sum((df$cell_density-expected)^2)/sum((df$cell_density-mean(df$cell_density))^2)
names(rsqr) <- "rsquared"
unique_id <- df$unique_id[[1]]
all <- cbind(rsqr, unique_id)
return(all)
}
tt_split32 <- TT_32 %>%
split(.$unique_id)
all_output32 <- tt_split32 %>%
map_df(fit_growth32)
rsqrs_32 <- all_output32 %>%
t(.) %>%
data.frame(.) %>%
rename(rsq = X1,
unique_id = X2)
ggplot() +
# geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot, temperature < 33), alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds32, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_wrap( ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = TT_32) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
# Bootstrap 32 ------------------------------------------------------------
boot_32 <- group_by(TT_32, unique_id) %>%
# create 200 bootstrap replicates per curve
do(., boot = modelr::bootstrap(., n = 1000, id = 'boot_num')) %>%
# unnest to show bootstrap number, .id
unnest() %>%
# regroup to include the boot_num
group_by(., unique_id, boot_num) %>%
# run the model using map()
mutate(fit = map(strap, ~ nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = data.frame(.),
iter = 50,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 10000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 5000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
info32_b <- boot_32 %>%
unnest(fit %>% map(glance))
preds_id32 <- boot_32 %>%
unnest(fit %>% map(tidy)) %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
boots_id32 <- boot_32 %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
preds_many_boot32 <- boot_32 %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
filter(uid %in% preds_id32$uid) %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
ungroup() %>%
# group by each value of days and get quantiles
group_by(., unique_id, days) %>%
summarise(lwr_CI = quantile(.fitted, 0.025),
upr_CI = quantile(.fitted, 0.975)) %>%
ungroup()
write_csv(preds_many_boot32, "data-processed/preds_many_boot_edit32.csv")
preds_many_boot32 <- read_csv("data-processed/preds_many_boot_edit32.csv")
preds_boot32 <- preds_many_boot32 %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
ggplot() +
geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot32), alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds32, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_grid(temperature ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = TT_32) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
# bootstrap ---------------------------------------------------------------
boot_many <- group_by(TT_fit, unique_id) %>%
# create 200 bootstrap replicates per curve
do(., boot = modelr::bootstrap(., n = 1000, id = 'boot_num')) %>%
# unnest to show bootstrap number, .id
unnest() %>%
# regroup to include the boot_num
group_by(., unique_id, boot_num) %>%
# run the model using map()
mutate(fit = map(strap, ~ nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = data.frame(.),
iter = 50,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 10000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 50000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
preds_id <- boot_many %>%
unnest(fit %>% map(tidy)) %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
boots_id <- boot_many %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
preds_many_boot <- boot_many %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
filter(uid %in% preds_id$uid) %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
ungroup() %>%
# group by each value of days and get quantiles
group_by(., unique_id, days) %>%
summarise(lwr_CI = quantile(.fitted, 0.025),
upr_CI = quantile(.fitted, 0.975)) %>%
ungroup()
write_csv(preds_many_boot, "data-processed/preds_many_boot_edit.csv")
preds_many_boot <- read_csv("data-processed/preds_many_boot_edit.csv")
preds_boot <- preds_many_boot %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
ggplot() +
geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot, temperature < 33), alpha = .3, fill = "grey") +
geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = preds_boot32, alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds3, temperature < 32)) +
geom_line(aes(x = days, y = .fitted), data = filter(preds32, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_grid(temperature ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = filter(TT_fit, temperature < 33)) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
ggsave("figures/growth_trajectories_withCI_32C_edit_bs.pdf", width = 10, height = 10)
# figure S2 in the supplement ---------------------------------------------
paramscool <- read_csv("data-processed/params-edit.csv") %>%
filter(!grepl("32", unique_id)) %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature)) %>%
# filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K")
params32 <- read_csv("data-processed/params32.csv") %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature)) %>%
# filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K")
all_params <- bind_rows(paramscool, params32)
paramscool %>%
ggplot(aes(x = inverse_temp, y = log(estimate))) +
geom_smooth(method = "lm", color = "black") +
geom_point(size = 4, alpha = 0.5) +
geom_point(size = 4, shape = 1) +
geom_point(aes(x = inverse_temp, y = log(estimate)), data = params32, shape = 1, size = 4) +
scale_x_reverse(sec.axis = sec_axis(~((1/(.*8.62 * 10^(-5)))-273.15))) + xlab("Temperature (1/kT)") +
ylab("Ln carrying capacity (cells/ml)") +
theme(text = element_text(size=12, family = "Arial")) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5)) +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
theme_bw() +
theme(text = element_text(size=12, family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.5),
plot.title = element_text(hjust = 0.5, size = 12)) +
ggtitle("Temperature (°C)")
ggsave("figures/figure2_with_32_edit.pdf", width = 4, height = 3.5)
log(5000)
| /Rscripts/23_logistic.R | no_license | OConnor-Lab-UBC/J-TEMP | R | false | false | 19,560 | r | library(tidyverse)
library(broom)
library(modelr)
library(nls.multstart)
sea <- read_csv("data-processed/sea_processed2.csv")
TT_fit <- sea %>%
filter(species == "TT") %>%
filter(temperature < 34) %>%
# filter(cell_density > 1000) %>%
# mutate(cell_density = ifelse(cell_density < 2000, 1000, cell_density)) %>%
mutate(cell_density = ifelse(cell_density == 2200, 1200, cell_density)) %>%
# mutate(cell_density = ifelse(cell_density < 2200 & temperature == 5, 500, cell_density)) %>%
filter(cell_density != 19767, cell_density != 30185, cell_density != 23949, cell_density != 5638, cell_density != 6505,
cell_density != 14164, cell_density != 13597, cell_density != 14438, cell_density != 14650,
cell_density != 15049,cell_density != 14530, cell_density != 5993) %>%
select(temperature, rep, cell_density, cell_volume, time_since_innoc_hours, start_time) %>%
mutate(time_since_innoc_hours = ifelse(is.na(time_since_innoc_hours), 12.18056, time_since_innoc_hours)) %>%
mutate(days = time_since_innoc_hours/24) %>%
unite(unique_id, temperature, rep, remove = FALSE, sep = "_")
write_csv(TT_fit, "data-processed/TT_fit_edit.csv")
write_csv(TT_fit, "data-processed/TT_fit_edit-final.csv") ## without 38C
TT_25 <- TT_fit %>%
filter(temperature == 25, days < 17)
TT_25 %>%
ggplot(aes(x = days, y = cell_density)) + geom_point() +
facet_wrap( ~ rep)
fits_many <- TT_fit %>%
group_by(unique_id) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = .x,
iter = 1000,
start_lower = c(K = 1000, r = 0),
start_upper = c(K = 20000, r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 50000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
# get summary info
info <- fits_many %>%
unnest(fit %>% map(glance))
# get params
params <- fits_many %>%
unnest(fit %>% map(tidy))
# get confidence intervals
CI <- fits_many %>%
unnest(fit %>% map(~ confint2(.x) %>%
data.frame() %>%
rename(., conf.low = X2.5.., conf.high = X97.5..))) %>%
group_by(., unique_id) %>%
mutate(., term = c('K', 'r')) %>%
ungroup()
params <- merge(params, CI, by = intersect(names(params), names(CI)))
write_csv(params, "data-processed/params-edit.csv")
params <- read_csv("data-processed/params-edit.csv")
params %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
ggplot(aes(x = temperature, y = estimate)) + geom_point() +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high)) +
facet_wrap( ~ term, scales = "free")
params %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ungroup() %>%
lm(log(estimate) ~ inverse_temp, data = .) %>% summary
tidy(., conf.int = TRUE)
### now try with mass^1/4
all_p <- left_join(params, masses)
write_csv(all_p, "data-processed/K-params-masses.csv")
all_p %>%
# separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ungroup() %>%
lm(log(estimate*(mean_size^(3/4))) ~ inverse_temp, data = .) %>%
tidy(., conf.int = TRUE)
all_p %>%
# separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
# filter(estimate < 50000) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ungroup() %>%
lm(log(estimate*(68.51255^(3/4))) ~ inverse_temp, data = .) %>%
tidy(., conf.int = TRUE)
all_p %>%
summarise(mean_all_size = mean(mean_size))
all_p %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ggplot(aes(x = inverse_temp, y = log(estimate*(mean_size^(3/4))))) +
geom_smooth(method = "lm", color = "black") +
geom_point(size = 4, alpha = 0.5) +
geom_point(size = 4, shape = 1) +
scale_x_reverse(sec.axis = sec_axis(~((1/(.*8.62 * 10^(-5)))-273.15))) + xlab("Temperature (1/kT)") +
ylab("Ln (carrying capacity (cells/ml) * M^3/4)") +
theme(text = element_text(size=12, family = "Arial")) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5)) +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
theme_bw() +
theme(text = element_text(size=12, family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.5),
plot.title = element_text(hjust = 0.5, size = 12)) +
ggtitle("Temperature (°C)")
ggsave("figures/figure2_mass_exponent.pdf", width = 4, height = 3.5)
params %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature)) %>%
filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K") %>%
ggplot(aes(x = inverse_temp, y = log(estimate))) +
geom_smooth(method = "lm", color = "black") +
geom_point(size = 4, alpha = 0.5) +
geom_point(size = 4, shape = 1) +
scale_x_reverse(sec.axis = sec_axis(~((1/(.*8.62 * 10^(-5)))-273.15))) + xlab("Temperature (1/kT)") +
ylab("Ln carrying capacity (cells/ml)") +
theme(text = element_text(size=12, family = "Arial")) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5)) +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
theme_bw() +
theme(text = element_text(size=12, family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.5),
plot.title = element_text(hjust = 0.5, size = 12)) +
ggtitle("Temperature (°C)")
ggsave("figures/figure2_no_32_edit.pdf", width = 4, height = 3.5)
new_preds <- TT_fit %>%
do(., data.frame(days = seq(min(.$days), max(.$days), length.out = 150), stringsAsFactors = FALSE))
preds2 <- fits_many %>%
unnest(fit %>% map(augment, newdata = new_preds))
preds3 <- preds2 %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
ggplot() +
# geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot, temperature < 33), alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds3, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_grid(temperature ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = filter(TT_fit, temperature < 33)) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
ggsave("figures/growth_trajectories_boot2_withCI_32C_new.pdf", width = 10, height = 10)
fit_growth <- function(data){
df <- data
res <- nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = df,
iter = 500,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 10000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 50000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))
expected<-logistic(df$days, coef(res)[2], coef(res)[1])
rsqr<-1-sum((df$cell_density-expected)^2)/sum((df$cell_density-mean(df$cell_density))^2)
names(rsqr) <- "rsquared"
unique_id <- df$unique_id[[1]]
all <- cbind(rsqr, unique_id)
return(all)
}
logistic <- function(days, r, K){
res <- K/(1 + (K/1200 - 1)*exp(-r*days))
res
}
tt_split <- TT_fit %>%
filter(temperature < 32) %>%
split(.$unique_id)
all_output1 <- tt_split %>%
map_df(fit_growth)
rsqrs <- all_output1 %>%
t(.) %>%
data.frame(.) %>%
rename(rsq = X1,
unique_id = X2)
write_csv(rsqrs, "data-processed/rsq-edit.csv")
View(rsqrs)
rsqrs %>%
filter(!grepl("32", unique_id)) %>%
mutate(rsq = as.numeric(as.character(rsq))) %>%
summarise(mean_r = mean(rsq))
# Fit 32 separately -------------------------------------------------------
TT_32 <- TT_fit %>%
filter(temperature == 32) %>%
mutate(cell_density = ifelse(cell_density == 2200, 1200, cell_density))
fits_32 <- TT_32 %>%
group_by(unique_id) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(cell_density ~ K/(1 + (K/1000 - 1)*exp(-r*days)),
data = .x,
iter = 500,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 5000, r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 5000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
preds32 <- fits_32 %>%
unnest(fit %>% map(augment, newdata = new_preds))
preds32 <- preds32 %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
params32 <- fits_32 %>%
unnest(fit %>% map(tidy))
write_csv(params32, "data-processed/params32.csv")
# get confidence intervals
CI32 <- fits_32 %>%
unnest(fit %>% map(~ confint2(.x) %>%
data.frame() %>%
rename(., conf.low = X2.5.., conf.high = X97.5..))) %>%
group_by(., unique_id) %>%
mutate(., term = c('K', 'r')) %>%
ungroup()
params32 <- merge(params32, CI32, by = intersect(names(params32), names(CI32)))
fit_growth32 <- function(data){
df <- data
res <- nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = df,
iter = 500,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 1000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 5000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))
expected<-logistic(df$days, coef(res)[2], coef(res)[1])
rsqr<-1-sum((df$cell_density-expected)^2)/sum((df$cell_density-mean(df$cell_density))^2)
names(rsqr) <- "rsquared"
unique_id <- df$unique_id[[1]]
all <- cbind(rsqr, unique_id)
return(all)
}
tt_split32 <- TT_32 %>%
split(.$unique_id)
all_output32 <- tt_split32 %>%
map_df(fit_growth32)
rsqrs_32 <- all_output32 %>%
t(.) %>%
data.frame(.) %>%
rename(rsq = X1,
unique_id = X2)
ggplot() +
# geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot, temperature < 33), alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds32, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_wrap( ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = TT_32) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
# Bootstrap 32 ------------------------------------------------------------
boot_32 <- group_by(TT_32, unique_id) %>%
# create 200 bootstrap replicates per curve
do(., boot = modelr::bootstrap(., n = 1000, id = 'boot_num')) %>%
# unnest to show bootstrap number, .id
unnest() %>%
# regroup to include the boot_num
group_by(., unique_id, boot_num) %>%
# run the model using map()
mutate(fit = map(strap, ~ nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = data.frame(.),
iter = 50,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 10000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 5000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
info32_b <- boot_32 %>%
unnest(fit %>% map(glance))
preds_id32 <- boot_32 %>%
unnest(fit %>% map(tidy)) %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
boots_id32 <- boot_32 %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
preds_many_boot32 <- boot_32 %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
filter(uid %in% preds_id32$uid) %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
ungroup() %>%
# group by each value of days and get quantiles
group_by(., unique_id, days) %>%
summarise(lwr_CI = quantile(.fitted, 0.025),
upr_CI = quantile(.fitted, 0.975)) %>%
ungroup()
write_csv(preds_many_boot32, "data-processed/preds_many_boot_edit32.csv")
preds_many_boot32 <- read_csv("data-processed/preds_many_boot_edit32.csv")
preds_boot32 <- preds_many_boot32 %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
ggplot() +
geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot32), alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds32, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_grid(temperature ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = TT_32) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
# bootstrap ---------------------------------------------------------------
boot_many <- group_by(TT_fit, unique_id) %>%
# create 200 bootstrap replicates per curve
do(., boot = modelr::bootstrap(., n = 1000, id = 'boot_num')) %>%
# unnest to show bootstrap number, .id
unnest() %>%
# regroup to include the boot_num
group_by(., unique_id, boot_num) %>%
# run the model using map()
mutate(fit = map(strap, ~ nls_multstart(cell_density ~ K/(1 + (K/1200 - 1)*exp(-r*days)),
data = data.frame(.),
iter = 50,
start_lower = c(K = 100, r = 0),
start_upper = c(K = 10000, r = 1),
supp_errors = 'Y',
na.action = na.omit,
lower = c(K = 100, r = 0),
upper = c(K = 50000, r = 2),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
preds_id <- boot_many %>%
unnest(fit %>% map(tidy)) %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
boots_id <- boot_many %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
distinct(uid)
preds_many_boot <- boot_many %>%
unite(uid, unique_id, boot_num, remove = FALSE) %>%
filter(uid %in% preds_id$uid) %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
ungroup() %>%
# group by each value of days and get quantiles
group_by(., unique_id, days) %>%
summarise(lwr_CI = quantile(.fitted, 0.025),
upr_CI = quantile(.fitted, 0.975)) %>%
ungroup()
write_csv(preds_many_boot, "data-processed/preds_many_boot_edit.csv")
preds_many_boot <- read_csv("data-processed/preds_many_boot_edit.csv")
preds_boot <- preds_many_boot %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature))
ggplot() +
geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = filter(preds_boot, temperature < 33), alpha = .3, fill = "grey") +
geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, x = days), data = preds_boot32, alpha = .3, fill = "grey") +
geom_line(aes(x = days, y = .fitted), data = filter(preds3, temperature < 32)) +
geom_line(aes(x = days, y = .fitted), data = filter(preds32, temperature < 33)) +
# geom_line(aes(x = days, y = .fitted), data = filter(preds1b, temperature < 33), color = "red") +
facet_grid(temperature ~ rep, labeller = labeller(.multi_line = FALSE)) +
theme(strip.background = element_rect(colour="white", fill="white")) +
theme(text = element_text(size=14, family = "Arial")) +
# geom_point(aes(x = days, y = cell_density), data = filter(TT_fit1, temperature < 33), color = "red") +
geom_point(aes(x = days, y = cell_density), data = filter(TT_fit, temperature < 33)) +
xlab("Time (days)") + ylab("Population abundance (cells/ml)")
ggsave("figures/growth_trajectories_withCI_32C_edit_bs.pdf", width = 10, height = 10)
# figure S2 in the supplement ---------------------------------------------
paramscool <- read_csv("data-processed/params-edit.csv") %>%
filter(!grepl("32", unique_id)) %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature)) %>%
# filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K")
params32 <- read_csv("data-processed/params32.csv") %>%
separate(unique_id, into = c("temperature", "rep"), remove = FALSE) %>%
mutate(temperature = as.numeric(temperature)) %>%
# filter(temperature < 32) %>%
mutate(inverse_temp = (1/(.00008617*(temperature+273.15)))) %>%
filter(term == "K")
all_params <- bind_rows(paramscool, params32)
paramscool %>%
ggplot(aes(x = inverse_temp, y = log(estimate))) +
geom_smooth(method = "lm", color = "black") +
geom_point(size = 4, alpha = 0.5) +
geom_point(size = 4, shape = 1) +
geom_point(aes(x = inverse_temp, y = log(estimate)), data = params32, shape = 1, size = 4) +
scale_x_reverse(sec.axis = sec_axis(~((1/(.*8.62 * 10^(-5)))-273.15))) + xlab("Temperature (1/kT)") +
ylab("Ln carrying capacity (cells/ml)") +
theme(text = element_text(size=12, family = "Arial")) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.5)) +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
theme_bw() +
theme(text = element_text(size=12, family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.5),
plot.title = element_text(hjust = 0.5, size = 12)) +
ggtitle("Temperature (°C)")
ggsave("figures/figure2_with_32_edit.pdf", width = 4, height = 3.5)
log(5000)
|
\name{linmod}
\alias{linmod}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
sadf}
\description{
sadf}
\usage{
linmod(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
asf}
\item{\dots}{
sadf}
}
\details{
sadf}
\value{
sadf%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
asf}
\author{
af}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
sadf}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, ...)
UseMethod("linmod")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/linmod.Rd | no_license | atomczik/linmod | R | false | false | 947 | rd | \name{linmod}
\alias{linmod}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
sadf}
\description{
sadf}
\usage{
linmod(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
asf}
\item{\dots}{
sadf}
}
\details{
sadf}
\value{
sadf%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
asf}
\author{
af}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
sadf}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, ...)
UseMethod("linmod")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
## deprecated functions
## as.mlm: deprecated in 2.5-0
`as.mlm` <-
function(x)
{
.Deprecated("see ?hatvalues.cca for new alternatives")
if (is.null(x$CCA))
stop("'as.mlm' can be used only for constrained ordination")
UseMethod("as.mlm")
}
`as.mlm.cca` <-
function (x)
{
w <- x$rowsum
WA <- x$CCA$wa
X <- qr.X(x$CCA$QR)
## shall use weighted regression: deweight X
X <- (1/sqrt(w)) * X
X <- as.data.frame(X)
lm(WA ~ ., data = X, weights = w)
}
`as.mlm.rda` <-
function (x)
{
X <- as.data.frame(qr.X(x$CCA$QR))
WA <- x$CCA$wa
lm(WA ~ . , data = X)
}
| /R/vegan-deprecated.R | no_license | Microbiology/vegan | R | false | false | 624 | r | ## deprecated functions
## as.mlm: deprecated in 2.5-0
`as.mlm` <-
function(x)
{
.Deprecated("see ?hatvalues.cca for new alternatives")
if (is.null(x$CCA))
stop("'as.mlm' can be used only for constrained ordination")
UseMethod("as.mlm")
}
`as.mlm.cca` <-
function (x)
{
w <- x$rowsum
WA <- x$CCA$wa
X <- qr.X(x$CCA$QR)
## shall use weighted regression: deweight X
X <- (1/sqrt(w)) * X
X <- as.data.frame(X)
lm(WA ~ ., data = X, weights = w)
}
`as.mlm.rda` <-
function (x)
{
X <- as.data.frame(qr.X(x$CCA$QR))
WA <- x$CCA$wa
lm(WA ~ . , data = X)
}
|
#' Palindromic squares
#'
#' Under OEIS \href{https://oeis.org/A002779}{A002779}, a \emph{Palindromic square} is a number that is
#' both Palindromic and Square. First 6 such numbers are 0, 1, 4, 9, 121, 484. It uses only the base 10 decimals.
#'
#' @param n the number of first \code{n} entries from the sequence.
#' @param gmp a logical; \code{TRUE} to use large number representation, \code{FALSE} otherwise.
#'
#' @return a vector of length \code{n} containing first entries from the sequence.
#'
#' @examples
#' ## generate first 10 palindromic squares
#' print(Palindromic.Squares(10))
#'
#' @rdname A002779
#' @aliases A002779
#' @export
Palindromic.Squares <- function(n, gmp=TRUE){
## Preprocessing for 'n'
n = check_n(n)
## Base
base = as.integer(10)
## Main Computation : first, compute in Rmpfr form
output = as.bigz(numeric(n))
output[1] = 0
if (n>1){
tgt = as.bigz(0)
iter= 1
while (iter < n){
tgt = tgt + 1
tgt2 = (tgt^2)
if (is.Palindromic(tgt2, base)){
iter = iter + 1
output[iter] = tgt2
}
}
}
## Rmpfr
if (!gmp){
output = as.integer(output)
}
return(output)
}
#' @keywords internal
#' @noRd
is.Square <- function(n){
tgt = sqrt(as.integer(n))
if (abs(tgt-round(tgt))>sqrt(.Machine$double.eps)){
return(FALSE)
} else {
return(TRUE)
}
}
| /R/Palindromic.Squares.R | no_license | cran/Zseq | R | false | false | 1,363 | r | #' Palindromic squares
#'
#' Under OEIS \href{https://oeis.org/A002779}{A002779}, a \emph{Palindromic square} is a number that is
#' both Palindromic and Square. First 6 such numbers are 0, 1, 4, 9, 121, 484. It uses only the base 10 decimals.
#'
#' @param n the number of first \code{n} entries from the sequence.
#' @param gmp a logical; \code{TRUE} to use large number representation, \code{FALSE} otherwise.
#'
#' @return a vector of length \code{n} containing first entries from the sequence.
#'
#' @examples
#' ## generate first 10 palindromic squares
#' print(Palindromic.Squares(10))
#'
#' @rdname A002779
#' @aliases A002779
#' @export
Palindromic.Squares <- function(n, gmp=TRUE){
## Preprocessing for 'n'
n = check_n(n)
## Base
base = as.integer(10)
## Main Computation : first, compute in Rmpfr form
output = as.bigz(numeric(n))
output[1] = 0
if (n>1){
tgt = as.bigz(0)
iter= 1
while (iter < n){
tgt = tgt + 1
tgt2 = (tgt^2)
if (is.Palindromic(tgt2, base)){
iter = iter + 1
output[iter] = tgt2
}
}
}
## Rmpfr
if (!gmp){
output = as.integer(output)
}
return(output)
}
#' @keywords internal
#' @noRd
is.Square <- function(n){
tgt = sqrt(as.integer(n))
if (abs(tgt-round(tgt))>sqrt(.Machine$double.eps)){
return(FALSE)
} else {
return(TRUE)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeMeta.R
\name{makeMeta}
\alias{makeMeta}
\title{Create a metadata object table for a set of charts}
\usage{
makeMeta(charts)
}
\arguments{
\item{charts}{list of safetyGraphics chart objects for which to create metadata}
}
\value{
tibble of metadata with the following columns:
\describe{
\item{domain}{Data domain}
\item{text_key}{Text key indicating the setting name. \code{'--'} delimiter indicates a field level data mapping}
\item{col_key}{Key for the column mapping}
\item{field_key}{Key for the field mapping (if any)}
\item{type}{type of mapping - "field" or "column"}
\item{label}{Label}
\item{description}{Description}
\item{multiple}{Mapping supports multiple columns/fields }
\item{standard_adam}{Default values for the ADaM data standard}
\item{standard_sdtm}{Default values for the SDTM data standard}
}
}
\description{
Generates metadata object for a list of charts. \code{makeMeta()} looks for metadata in 3 locations for each \code{chart} object:
\itemize{
\item Domain-level metadata saved as meta_{chart$name} in the chart$package namespace
\item Chart-specific metadata saved as meta_{chart$domain} in the chart$package namespace
\item Chart-specific metadata saved directly to the chart object as chart$meta
After checking all charts, all metadata files are stacked in to a single dataframe and returned. If duplicate metadata rows (domain + text_key) are found, an error is thrown.
}
}
| /man/makeMeta.Rd | permissive | SafetyGraphics/safetyGraphics | R | false | true | 1,488 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeMeta.R
\name{makeMeta}
\alias{makeMeta}
\title{Create a metadata object table for a set of charts}
\usage{
makeMeta(charts)
}
\arguments{
\item{charts}{list of safetyGraphics chart objects for which to create metadata}
}
\value{
tibble of metadata with the following columns:
\describe{
\item{domain}{Data domain}
\item{text_key}{Text key indicating the setting name. \code{'--'} delimiter indicates a field level data mapping}
\item{col_key}{Key for the column mapping}
\item{field_key}{Key for the field mapping (if any)}
\item{type}{type of mapping - "field" or "column"}
\item{label}{Label}
\item{description}{Description}
\item{multiple}{Mapping supports multiple columns/fields }
\item{standard_adam}{Default values for the ADaM data standard}
\item{standard_sdtm}{Default values for the SDTM data standard}
}
}
\description{
Generates metadata object for a list of charts. \code{makeMeta()} looks for metadata in 3 locations for each \code{chart} object:
\itemize{
\item Domain-level metadata saved as meta_{chart$name} in the chart$package namespace
\item Chart-specific metadata saved as meta_{chart$domain} in the chart$package namespace
\item Chart-specific metadata saved directly to the chart object as chart$meta
After checking all charts, all metadata files are stacked in to a single dataframe and returned. If duplicate metadata rows (domain + text_key) are found, an error is thrown.
}
}
|
\docType{class}
\name{lmList-class}
\alias{lmList-class}
\alias{show,lmList-method}
\title{Class "lmList" of 'lm' Objects on Common Model}
\description{
Class \code{"lmList"} is an S4 class with basically a
list of objects of class \code{\link{lm}} with a common
model.
}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{new("lmList", ...)} or, more commonly, by a call to
\code{\link{lmList}}.
}
\keyword{classes}
| /man/lmList-class.Rd | no_license | jknowles/lme4 | R | false | false | 463 | rd | \docType{class}
\name{lmList-class}
\alias{lmList-class}
\alias{show,lmList-method}
\title{Class "lmList" of 'lm' Objects on Common Model}
\description{
Class \code{"lmList"} is an S4 class with basically a
list of objects of class \code{\link{lm}} with a common
model.
}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{new("lmList", ...)} or, more commonly, by a call to
\code{\link{lmList}}.
}
\keyword{classes}
|
context("datetime")
test_that("datetime conversions", {
h <- skip_unless_has_test_db()
t1 <- execute(h, "`datetime$6803.5601388888890142")
t2 <- execute(h, "`datetime$6803.5601388888917427")
expect_equal(t2 - t1 > 0,TRUE)
}) | /tests/testthat/test-datetime.R | permissive | vishalbelsare/rkdb | R | false | false | 233 | r | context("datetime")
test_that("datetime conversions", {
h <- skip_unless_has_test_db()
t1 <- execute(h, "`datetime$6803.5601388888890142")
t2 <- execute(h, "`datetime$6803.5601388888917427")
expect_equal(t2 - t1 > 0,TRUE)
}) |
####################################################################################################
#
# Single site LAI SDA
#
#
# --- Last updated: 03.22.2019 By Shawn P. Serbin <sserbin@bnl.gov>
####################################################################################################
#---------------- Close all devices and delete all variables. -------------------------------------#
rm(list=ls(all=TRUE)) # clear workspace
graphics.off() # close any open graphics
closeAllConnections() # close any open connections to files
#--------------------------------------------------------------------------------------------------#
#---------------- Load required libraries ---------------------------------------------------------#
library(PEcAn.all)
library(PEcAn.SIPNET)
library(PEcAn.LINKAGES)
library(PEcAn.visualization)
library(PEcAnAssimSequential)
library(nimble)
library(lubridate)
library(PEcAn.visualization)
#PEcAnAssimSequential::
library(rgdal) # need to put in assim.sequential
library(ncdf4) # need to put in assim.sequential
library(purrr)
library(listviewer)
library(dplyr)
run_SDA <- TRUE #TRUE/FALSE
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## set run options, some of these should be tweaked or removed as requirements
work_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI"
setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions
# Deifine observation - use existing or generate new?
# set to a specific file, use that.
observation <- c("1000000048")
# delete an old run
unlink(c('run','out','SDA'),recursive = T)
# grab multi-site XML file
settings <- read.settings("XMLs/pecan_US-CZ3_LAI_SDA.xml")
# what is this step for???? is this to get the site locations for the map??
if ("MultiSettings" %in% class(settings)) site.ids <- settings %>%
map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character()
# sample from parameters used for both sensitivity analysis and Ens
get.parameter.samples(settings,
ens.sample.method = settings$ensemble$samplingspace$parameters$method)
## Aside: if method were set to unscented, would take minimal changes to do UnKF
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## Prepare observational data - still very hacky here
# option 1: use existing observation file
# if (observation!="new") {
# load(observation)
# site1 <- point_list
# site1$median_AGB[[1]] %>%
# filter(Site_ID!='772') -> site1$median_AGB[[1]]
# site1$stdv_AGB[[1]] %>%
# filter(Site_ID!='772') -> site1$stdv_AGB[[1]]
# }
# where to put MODIS LAI data?
data_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI/modis_lai_data"
parameters <- settings$run
# get MODIS data
#modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon),
# start_date = parameters$start.date, end_date = parameters$end.date,
# siteID = parameters$site$id, size = 0, product = "MOD15A2H", band = "Lai_500m",
# band_qc = "", band_sd = "LaiStdDev_500m", package_method = "MODISTools")
#modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon),
# start_date = "2001/01/01", end_date = "2002/01/01",
# size = 0, product = "MOD15A2H", band = "Lai_500m",
# band_qc = "", band_sd = "LaiStdDev_500m", package_method = "MODISTools")
if (!file.exists(file.path(data_dir,'modis_lai_output.RData'))) {
modis <- call_MODIS(product = "MOD15A2H", band = "Lai_500m", start_date = "2001001", end_date = "2010365",
lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), size = 0,
band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m",
package_method = "MODISTools")
save(modis, file = file.path(data_dir,'modis_lai_output.RData'))
} else {
load(file = file.path(data_dir,'modis_lai_output.RData'))
}
#
bety <- list(user='bety', password='bety', host='localhost',
dbname='bety', driver='PostgreSQL',write=TRUE)
con <- PEcAn.DB::db.open(bety)
bety$con <- con
suppressWarnings(Site_Info <- PEcAn.DB::query.site(observation, con))
Site_Info
Site_ID <- Site_Info$id
Site_Name <- Site_Info$sitename
#plot(lubridate::as_date(modis$calendar_date), modis$data, type="l")
peak_lai <- vector()
years <- unique(year(as.Date(modis$calendar_date, "%Y-%m-%d")))
for (i in seq_along(years)) {
year <- years[i]
g <- grep(modis$calendar_date, pattern = year)
d <- modis[g,]
max <- which(d$data == max(d$data, na.rm = T))
peak <- d[max,][1,]
peak$calendar_date = paste("Year", year, sep = "_")
peak_lai <- rbind(peak_lai, peak)
}
# transpose the data
median_lai = as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$data))), stringsAsFactors = F)
colnames(median_lai) = c("Site_ID", "Site_Name", peak_lai$calendar_date)
median_lai[3:length(median_lai)] = as.numeric(median_lai[3:length(median_lai)])
stdv_lai = as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$sd))), stringsAsFactors = F)
colnames(stdv_lai) = c("Site_ID", "Site_Name", peak_lai$calendar_date)
stdv_lai[3:length(stdv_lai)] = as.numeric(stdv_lai[3:length(stdv_lai)])
point_list = list()
point_list$median_lai = median_lai
point_list$stdv_lai = stdv_lai
## needed for landtrendr for nested lists. Lai isn't as nested
#point_list$median_lai <- point_list$median_lai[[1]] %>% filter(Site_ID %in% site.ids)
#point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids)
site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>%
as.numeric() %>% na.omit()
point_list$median_lai <- point_list$median_lai[site.order,]
point_list$stdv_lai <- point_list$stdv_lai[site.order,]
# truning lists to dfs for both mean and cov
date.obs <- strsplit(names(point_list$median_lai),"_")[3:length(point_list$median_lai)] %>% map_chr(~.x[2]) %>% paste0(.,"/07/15")
obs.mean <- names(point_list$median_lai)[3:length(point_list$median_lai)] %>%
map(function(namesl){
((point_list$median_lai)[[namesl]] %>%
map(~.x %>% as.data.frame %>% `colnames<-`(c('LAI'))) %>%
setNames(site.ids[1:length(.)])
)
}) %>% setNames(date.obs)
obs.cov <-names(point_list$stdv_lai)[3:length(point_list$median_lai)] %>%
map(function(namesl) {
((point_list$stdv_lai)[[namesl]] %>%
map( ~ (.x) ^ 2 %>% as.matrix()) %>%
setNames(site.ids[1:length(.)]))
}) %>% setNames(date.obs)
# check input data - after creating list of lists
PEcAnAssimSequential::Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]])
PEcAnAssimSequential::Construct.R(site.ids, "LAI", obs.mean[[10]], obs.cov[[10]])
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## generate new settings object
new.settings <- PEcAn.settings::prepare.settings(settings)
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## Run SDA
if (run_SDA) {
#sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov,
sda.enkf.multisite(settings, obs.mean =obs.mean ,obs.cov = obs.cov,
control=list(trace=T,
FF=F,
interactivePlot=F,
TimeseriesPlot=T,
BiasPlot=F,
plot.title="LAI SDA, 1 site",
facet.plots=T,
debug=T,
pause=F))
# sda.enkf(settings, obs.mean = obs.mean ,obs.cov = obs.cov,
# control=list(trace=T,
# FF=F,
# interactivePlot=F,
# TimeseriesPlot=T,
# BiasPlot=F,
# plot.title="LAI SDA, 1 site",
# facet.plots=T,
# debug=T,
# pause=F))
} else {
print("*** Not running SDA ***")
}
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## Wrap up
# Send email if configured
#if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) {
# sendmail(settings$email$from, settings$email$to,
# paste0("SDA workflow has finished executing at ", base::date()))
#}
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
### EOF
| /modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/single_site_SDA_BNL.R | permissive | PecanProject/pecan | R | false | false | 9,703 | r | ####################################################################################################
#
# Single site LAI SDA
#
#
# --- Last updated: 03.22.2019 By Shawn P. Serbin <sserbin@bnl.gov>
####################################################################################################
#---------------- Close all devices and delete all variables. -------------------------------------#
rm(list=ls(all=TRUE)) # clear workspace
graphics.off() # close any open graphics
closeAllConnections() # close any open connections to files
#--------------------------------------------------------------------------------------------------#
#---------------- Load required libraries ---------------------------------------------------------#
library(PEcAn.all)
library(PEcAn.SIPNET)
library(PEcAn.LINKAGES)
library(PEcAn.visualization)
library(PEcAnAssimSequential)
library(nimble)
library(lubridate)
library(PEcAn.visualization)
#PEcAnAssimSequential::
library(rgdal) # need to put in assim.sequential
library(ncdf4) # need to put in assim.sequential
library(purrr)
library(listviewer)
library(dplyr)
run_SDA <- TRUE #TRUE/FALSE
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## set run options, some of these should be tweaked or removed as requirements
work_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI"
setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions
# Deifine observation - use existing or generate new?
# set to a specific file, use that.
observation <- c("1000000048")
# delete an old run
unlink(c('run','out','SDA'),recursive = T)
# grab multi-site XML file
settings <- read.settings("XMLs/pecan_US-CZ3_LAI_SDA.xml")
# what is this step for???? is this to get the site locations for the map??
if ("MultiSettings" %in% class(settings)) site.ids <- settings %>%
map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character()
# sample from parameters used for both sensitivity analysis and Ens
get.parameter.samples(settings,
ens.sample.method = settings$ensemble$samplingspace$parameters$method)
## Aside: if method were set to unscented, would take minimal changes to do UnKF
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## Prepare observational data - still very hacky here
# option 1: use existing observation file
# if (observation!="new") {
# load(observation)
# site1 <- point_list
# site1$median_AGB[[1]] %>%
# filter(Site_ID!='772') -> site1$median_AGB[[1]]
# site1$stdv_AGB[[1]] %>%
# filter(Site_ID!='772') -> site1$stdv_AGB[[1]]
# }
# where to put MODIS LAI data?
data_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI/modis_lai_data"
parameters <- settings$run
# get MODIS data
#modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon),
# start_date = parameters$start.date, end_date = parameters$end.date,
# siteID = parameters$site$id, size = 0, product = "MOD15A2H", band = "Lai_500m",
# band_qc = "", band_sd = "LaiStdDev_500m", package_method = "MODISTools")
#modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon),
# start_date = "2001/01/01", end_date = "2002/01/01",
# size = 0, product = "MOD15A2H", band = "Lai_500m",
# band_qc = "", band_sd = "LaiStdDev_500m", package_method = "MODISTools")
if (!file.exists(file.path(data_dir,'modis_lai_output.RData'))) {
modis <- call_MODIS(product = "MOD15A2H", band = "Lai_500m", start_date = "2001001", end_date = "2010365",
lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), size = 0,
band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m",
package_method = "MODISTools")
save(modis, file = file.path(data_dir,'modis_lai_output.RData'))
} else {
load(file = file.path(data_dir,'modis_lai_output.RData'))
}
#
bety <- list(user='bety', password='bety', host='localhost',
dbname='bety', driver='PostgreSQL',write=TRUE)
con <- PEcAn.DB::db.open(bety)
bety$con <- con
suppressWarnings(Site_Info <- PEcAn.DB::query.site(observation, con))
Site_Info
Site_ID <- Site_Info$id
Site_Name <- Site_Info$sitename
#plot(lubridate::as_date(modis$calendar_date), modis$data, type="l")
peak_lai <- vector()
years <- unique(year(as.Date(modis$calendar_date, "%Y-%m-%d")))
for (i in seq_along(years)) {
year <- years[i]
g <- grep(modis$calendar_date, pattern = year)
d <- modis[g,]
max <- which(d$data == max(d$data, na.rm = T))
peak <- d[max,][1,]
peak$calendar_date = paste("Year", year, sep = "_")
peak_lai <- rbind(peak_lai, peak)
}
# transpose the data
median_lai = as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$data))), stringsAsFactors = F)
colnames(median_lai) = c("Site_ID", "Site_Name", peak_lai$calendar_date)
median_lai[3:length(median_lai)] = as.numeric(median_lai[3:length(median_lai)])
stdv_lai = as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$sd))), stringsAsFactors = F)
colnames(stdv_lai) = c("Site_ID", "Site_Name", peak_lai$calendar_date)
stdv_lai[3:length(stdv_lai)] = as.numeric(stdv_lai[3:length(stdv_lai)])
point_list = list()
point_list$median_lai = median_lai
point_list$stdv_lai = stdv_lai
## needed for landtrendr for nested lists. Lai isn't as nested
#point_list$median_lai <- point_list$median_lai[[1]] %>% filter(Site_ID %in% site.ids)
#point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids)
site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>%
as.numeric() %>% na.omit()
point_list$median_lai <- point_list$median_lai[site.order,]
point_list$stdv_lai <- point_list$stdv_lai[site.order,]
# truning lists to dfs for both mean and cov
date.obs <- strsplit(names(point_list$median_lai),"_")[3:length(point_list$median_lai)] %>% map_chr(~.x[2]) %>% paste0(.,"/07/15")
obs.mean <- names(point_list$median_lai)[3:length(point_list$median_lai)] %>%
map(function(namesl){
((point_list$median_lai)[[namesl]] %>%
map(~.x %>% as.data.frame %>% `colnames<-`(c('LAI'))) %>%
setNames(site.ids[1:length(.)])
)
}) %>% setNames(date.obs)
obs.cov <-names(point_list$stdv_lai)[3:length(point_list$median_lai)] %>%
map(function(namesl) {
((point_list$stdv_lai)[[namesl]] %>%
map( ~ (.x) ^ 2 %>% as.matrix()) %>%
setNames(site.ids[1:length(.)]))
}) %>% setNames(date.obs)
# check input data - after creating list of lists
PEcAnAssimSequential::Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]])
PEcAnAssimSequential::Construct.R(site.ids, "LAI", obs.mean[[10]], obs.cov[[10]])
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## generate new settings object
new.settings <- PEcAn.settings::prepare.settings(settings)
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## Run SDA
if (run_SDA) {
#sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov,
sda.enkf.multisite(settings, obs.mean =obs.mean ,obs.cov = obs.cov,
control=list(trace=T,
FF=F,
interactivePlot=F,
TimeseriesPlot=T,
BiasPlot=F,
plot.title="LAI SDA, 1 site",
facet.plots=T,
debug=T,
pause=F))
# sda.enkf(settings, obs.mean = obs.mean ,obs.cov = obs.cov,
# control=list(trace=T,
# FF=F,
# interactivePlot=F,
# TimeseriesPlot=T,
# BiasPlot=F,
# plot.title="LAI SDA, 1 site",
# facet.plots=T,
# debug=T,
# pause=F))
} else {
print("*** Not running SDA ***")
}
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
## Wrap up
# Send email if configured
#if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) {
# sendmail(settings$email$from, settings$email$to,
# paste0("SDA workflow has finished executing at ", base::date()))
#}
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
### EOF
|
#=============================================
# Run Tutorial 15 prior to running this code
#=============================================
#install.packages('wordcloud')
#install.packages('udpipe')
#install.packages('textdata') #to install nrc lexicon
library(wordcloud)
library(udpipe)
library(lattice)
#################################################
#==============Sentiment Analysis===============#
# Conduct various assessments to understand the #
# sentiment of tweets related to the airline #
# industry #
#################################################
#===============================================
# Visualization of sentiment for each airlines
#===============================================
ggplot(tweets_data, aes(x = airline_sentiment, fill = airline_sentiment)) +
geom_bar() +
facet_grid(. ~ airline) +
theme(axis.text.x = element_text(angle=65, vjust=0.6),
plot.margin = unit(c(3,0,3,0), "cm"))
#================================================
# The Most Frequent Words in Positive Sentiment
# Generate a wordcloud, then a bar chart
#================================================
positive = tidy_dataset4 %>%
filter(airline_sentiment == "positive")
head(positive)
wordcloud(positive[,2],
max.words = 100,
random.order=FALSE,
rot.per=0.30,
use.r.layout=FALSE,
colors=brewer.pal(2, "Blues"))
# Fewer "positive" terms with high frequency
counts5 = count(positive, word, sort = TRUE)
counts5 = rename(counts5, freq = n)
positive2 = top_n(counts5, 21)
positive2
# Alternative with piping
positive2 = positive %>%
count(word, sort = TRUE) %>%
rename(freq = n) %>%
top_n(21)
positive2
# Note: the text ðÿ is an emoticon smiley in Twitter
colourCount = length(unique(positive2$word))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
positive2 %>%
mutate(word = reorder(word, freq)) %>%
ggplot(aes(x = word, y = freq)) +
geom_col(fill = getPalette(colourCount)) +
coord_flip()
#================================================
# The Most Frequent Words in Negative Sentiment
# Generate a wordcloud, then a bar chart
#================================================
negative = tidy_dataset4 %>%
filter(airline_sentiment == "negative")
wordcloud(negative[,2],
max.words = 100,
random.order=FALSE,
rot.per=0.30,
use.r.layout=FALSE,
colors=brewer.pal(2, "Reds"))
# Fewer "negative" terms with high frequency
counts6 = count(negative, word, sort = TRUE)
counts6 = rename(counts6, freq = n)
negative2 = top_n(counts6, 21)
# Alternative with Piping
negative2 = negative %>%
count(word, sort = TRUE) %>%
rename(freq = n) %>%
top_n(21)
colourCount = length(unique(negative2$word))
getPalette = colorRampPalette(brewer.pal(8, "Dark2"))
negative2 %>%
mutate(word = reorder(word, freq)) %>%
ggplot(aes(x = word, y = freq)) +
geom_col(fill = getPalette(colourCount)) +
coord_flip()
#===================================
# Preview the available sentiments
# in the NRC dictionary
#===================================
get_sentiments('bing') %>%
distinct(sentiment)
get_sentiments('nrc') %>%
distinct(sentiment)
#=================================================
# Sentiment Lexicon: Pull the sentiment from
# the text using the Bing Liu and collaborators
# lexicon.
# 1) Retrieve words with sentiment scores
# 2) Generate count of positive & negative words
# 3) Spread out data to place positive and nega-
# tive sentiment in separate columns
# 4) Calculate the difference between the total
# positive words and total negative words
#=================================================
newjoin = inner_join(tidy_dataset4, get_sentiments('bing'))
counts7 = count(newjoin, sentiment)
spread1 = spread(counts7, sentiment, n, fill = 0)
(mutate(spread1, diffsent = positive - negative))
# Alternative
tidy_dataset4 %>%
inner_join(get_sentiments('bing')) %>%
count(sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(diffsent = positive - negative)
#======================================
# Generate a joy/sadness lexicon
# and merge with the data.
# This assesses the overall sentiment
# for all our tweets, not individual
# tweets
#======================================
nrc_joysad = get_sentiments('nrc') %>%
filter(sentiment == 'joy' |
sentiment == 'sadness')
nrow(nrc_joysad)
newjoin2 = inner_join(tidy_dataset4, nrc_joysad)
counts8 = count(newjoin2, word, sentiment)
spread2 = spread(counts8, sentiment, n, fill = 0)
content_data = mutate(spread2, contentment = joy - sadness, linenumber = row_number())
tweet_joysad = arrange(content_data, desc(contentment))
# Alternative
(tweet_joysad = tidy_dataset4 %>%
inner_join(nrc_joysad) %>%
count(word, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(contentment = joy - sadness, linenumber = row_number()) %>%
arrange(desc(contentment)))
ggplot(tweet_joysad, aes(x=linenumber, y=contentment)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Contentment'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
# Adjustments to the figure to improve
# visualization
(tweet_joysad2 = tweet_joysad %>%
slice(1:10,253:262))
ggplot(tweet_joysad2, aes(x=linenumber, y=contentment, fill=word)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Contentment'
) +
theme(
legend.position = 'bottom',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
#================================
# Generate a trust/fear lexicon
# and merge with the data
#================================
nrc_trstfear = get_sentiments('nrc') %>%
filter(sentiment == 'trust' |
sentiment == 'fear')
nrow(nrc_trstfear)
(tweet_trstfear = tidy_dataset4 %>%
inner_join(nrc_trstfear) %>%
count(word, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(trustworthy = trust - fear, linenumber = row_number()) %>%
arrange(desc(trustworthy)) %>%
slice(1:10,348:357))
ggplot(tweet_trstfear, aes(x=linenumber, y=trustworthy, fill=word)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Trustworthiness'
) +
theme(
legend.position = 'bottom',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
# Result: the term with the greatest occurrences is
# gate, which normally appears in a tweet when a
# customer complains about a transaction or a
# poor experience with customer service
airline_tweets %>%
filter(str_detect(text, 'gate|Gate')) %>%
select(text)
# Wordcloud contrasting both sides
library(reshape2)
tidy_dataset4 %>%
inner_join(nrc_trstfear) %>%
count(word, sentiment) %>%
slice(1:40,318:357) %>%
acast(word~sentiment, value.var='n',fill=0) %>%
comparison.cloud(colors=c('gray30','gray70'))
#################################################
#============Part of Speech Tagging=============#
# Conduct various assessments to understand the #
# sentiment of tweets related to the airline #
# industry #
#################################################
# Download if necessary
#ud_model = udpipe_download_model(language = "english")
tidy_post1 = tidy_dataset4 %>%
select(word)
ud_model = udpipe_load_model(ud_model$file_model)
tagging_data = as.data.frame(udpipe_annotate(ud_model, x = tidy_post1$word))
#==================================
# Basic POST frequency statistics
#==================================
post_stats = txt_freq(tagging_data$upos)
post_stats$key = factor(post_stats$key, levels = rev(post_stats$key))
ggplot(post_stats, aes(x=key, y=as.factor(freq), fill=key)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='UPOS (Universal Parts of Speech)'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
geom_col() +
scale_fill_grey()
#======================
# Most Occuring NOUNS
#======================
noun_stats = subset(tagging_data, upos %in% c("NOUN"))
noun_stats2 = txt_freq(noun_stats$token)
noun_stats2$key = factor(noun_stats2$key, levels = rev(noun_stats2$key))
noun_stats2 %>%
slice(1:20) %>%
ggplot(aes(x=key, y=as.factor(freq), fill=freq)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='Noun Occurrences'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
scale_fill_gradient(low="orange", high="orange3") +
geom_col()
#===========================
# Most Occuring ADJECTIVES
#===========================
adjstats = subset(tagging_data, upos %in% c("ADJ"))
adjstats2 = txt_freq(adjstats$token)
adjstats2$key = factor(adjstats2$key, levels = rev(adjstats2$key))
adjstats2 %>%
slice(1:20) %>%
ggplot(aes(x=key, y=as.factor(freq), fill=freq)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='Adjective Occurrences'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
scale_fill_gradient(low="chartreuse", high="chartreuse3") +
geom_col()
#======================
# Most Occuring VERBS
#======================
verbstats = subset(tagging_data, upos %in% c("VERB"))
verbstats2 = txt_freq(verbstats$token)
verbstats2$key = factor(verbstats2$key, levels = rev(verbstats2$key))
verbstats2 %>%
slice(1:20) %>%
ggplot(aes(x=key, y=as.factor(freq), fill=freq)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='Verb Occurrences'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
scale_fill_gradient(low="tan", high="tan3") +
geom_col()
#===============
# What about X?
#===============
xstats = subset(tagging_data, upos %in% c("X"))
xstats2 = txt_freq(xstats$token)
xstats2$key = factor(xstats2$key, levels = rev(xstats2$key))
xstats2$key
# Contains hashtags, abbreviations, and Twitter account usernames. | /Tutorials/sentiment-analysis/assets/sentiment analysis example.R | no_license | rifayat/OSU_MSIS_5193_Tutorials | R | false | false | 11,128 | r | #=============================================
# Run Tutorial 15 prior to running this code
#=============================================
#install.packages('wordcloud')
#install.packages('udpipe')
#install.packages('textdata') #to install nrc lexicon
library(wordcloud)
library(udpipe)
library(lattice)
#################################################
#==============Sentiment Analysis===============#
# Conduct various assessments to understand the #
# sentiment of tweets related to the airline #
# industry #
#################################################
#===============================================
# Visualization of sentiment for each airlines
#===============================================
ggplot(tweets_data, aes(x = airline_sentiment, fill = airline_sentiment)) +
geom_bar() +
facet_grid(. ~ airline) +
theme(axis.text.x = element_text(angle=65, vjust=0.6),
plot.margin = unit(c(3,0,3,0), "cm"))
#================================================
# The Most Frequent Words in Positive Sentiment
# Generate a wordcloud, then a bar chart
#================================================
positive = tidy_dataset4 %>%
filter(airline_sentiment == "positive")
head(positive)
wordcloud(positive[,2],
max.words = 100,
random.order=FALSE,
rot.per=0.30,
use.r.layout=FALSE,
colors=brewer.pal(2, "Blues"))
# Fewer "positive" terms with high frequency
counts5 = count(positive, word, sort = TRUE)
counts5 = rename(counts5, freq = n)
positive2 = top_n(counts5, 21)
positive2
# Alternative with piping
positive2 = positive %>%
count(word, sort = TRUE) %>%
rename(freq = n) %>%
top_n(21)
positive2
# Note: the text ðÿ is an emoticon smiley in Twitter
colourCount = length(unique(positive2$word))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
positive2 %>%
mutate(word = reorder(word, freq)) %>%
ggplot(aes(x = word, y = freq)) +
geom_col(fill = getPalette(colourCount)) +
coord_flip()
#================================================
# The Most Frequent Words in Negative Sentiment
# Generate a wordcloud, then a bar chart
#================================================
negative = tidy_dataset4 %>%
filter(airline_sentiment == "negative")
wordcloud(negative[,2],
max.words = 100,
random.order=FALSE,
rot.per=0.30,
use.r.layout=FALSE,
colors=brewer.pal(2, "Reds"))
# Fewer "negative" terms with high frequency
counts6 = count(negative, word, sort = TRUE)
counts6 = rename(counts6, freq = n)
negative2 = top_n(counts6, 21)
# Alternative with Piping
negative2 = negative %>%
count(word, sort = TRUE) %>%
rename(freq = n) %>%
top_n(21)
colourCount = length(unique(negative2$word))
getPalette = colorRampPalette(brewer.pal(8, "Dark2"))
negative2 %>%
mutate(word = reorder(word, freq)) %>%
ggplot(aes(x = word, y = freq)) +
geom_col(fill = getPalette(colourCount)) +
coord_flip()
#===================================
# Preview the available sentiments
# in the NRC dictionary
#===================================
get_sentiments('bing') %>%
distinct(sentiment)
get_sentiments('nrc') %>%
distinct(sentiment)
#=================================================
# Sentiment Lexicon: Pull the sentiment from
# the text using the Bing Liu and collaborators
# lexicon.
# 1) Retrieve words with sentiment scores
# 2) Generate count of positive & negative words
# 3) Spread out data to place positive and nega-
# tive sentiment in separate columns
# 4) Calculate the difference between the total
# positive words and total negative words
#=================================================
newjoin = inner_join(tidy_dataset4, get_sentiments('bing'))
counts7 = count(newjoin, sentiment)
spread1 = spread(counts7, sentiment, n, fill = 0)
(mutate(spread1, diffsent = positive - negative))
# Alternative
tidy_dataset4 %>%
inner_join(get_sentiments('bing')) %>%
count(sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(diffsent = positive - negative)
#======================================
# Generate a joy/sadness lexicon
# and merge with the data.
# This assesses the overall sentiment
# for all our tweets, not individual
# tweets
#======================================
nrc_joysad = get_sentiments('nrc') %>%
filter(sentiment == 'joy' |
sentiment == 'sadness')
nrow(nrc_joysad)
newjoin2 = inner_join(tidy_dataset4, nrc_joysad)
counts8 = count(newjoin2, word, sentiment)
spread2 = spread(counts8, sentiment, n, fill = 0)
content_data = mutate(spread2, contentment = joy - sadness, linenumber = row_number())
tweet_joysad = arrange(content_data, desc(contentment))
# Alternative
(tweet_joysad = tidy_dataset4 %>%
inner_join(nrc_joysad) %>%
count(word, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(contentment = joy - sadness, linenumber = row_number()) %>%
arrange(desc(contentment)))
ggplot(tweet_joysad, aes(x=linenumber, y=contentment)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Contentment'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
# Adjustments to the figure to improve
# visualization
(tweet_joysad2 = tweet_joysad %>%
slice(1:10,253:262))
ggplot(tweet_joysad2, aes(x=linenumber, y=contentment, fill=word)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Contentment'
) +
theme(
legend.position = 'bottom',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
#================================
# Generate a trust/fear lexicon
# and merge with the data
#================================
nrc_trstfear = get_sentiments('nrc') %>%
filter(sentiment == 'trust' |
sentiment == 'fear')
nrow(nrc_trstfear)
(tweet_trstfear = tidy_dataset4 %>%
inner_join(nrc_trstfear) %>%
count(word, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(trustworthy = trust - fear, linenumber = row_number()) %>%
arrange(desc(trustworthy)) %>%
slice(1:10,348:357))
ggplot(tweet_trstfear, aes(x=linenumber, y=trustworthy, fill=word)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Trustworthiness'
) +
theme(
legend.position = 'bottom',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
# Result: the term with the greatest occurrences is
# gate, which normally appears in a tweet when a
# customer complains about a transaction or a
# poor experience with customer service
airline_tweets %>%
filter(str_detect(text, 'gate|Gate')) %>%
select(text)
# Wordcloud contrasting both sides
library(reshape2)
tidy_dataset4 %>%
inner_join(nrc_trstfear) %>%
count(word, sentiment) %>%
slice(1:40,318:357) %>%
acast(word~sentiment, value.var='n',fill=0) %>%
comparison.cloud(colors=c('gray30','gray70'))
#################################################
#============Part of Speech Tagging=============#
# Conduct various assessments to understand the #
# sentiment of tweets related to the airline #
# industry #
#################################################
# Download if necessary
#ud_model = udpipe_download_model(language = "english")
tidy_post1 = tidy_dataset4 %>%
select(word)
ud_model = udpipe_load_model(ud_model$file_model)
tagging_data = as.data.frame(udpipe_annotate(ud_model, x = tidy_post1$word))
#==================================
# Basic POST frequency statistics
#==================================
post_stats = txt_freq(tagging_data$upos)
post_stats$key = factor(post_stats$key, levels = rev(post_stats$key))
ggplot(post_stats, aes(x=key, y=as.factor(freq), fill=key)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='UPOS (Universal Parts of Speech)'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
geom_col() +
scale_fill_grey()
#======================
# Most Occuring NOUNS
#======================
noun_stats = subset(tagging_data, upos %in% c("NOUN"))
noun_stats2 = txt_freq(noun_stats$token)
noun_stats2$key = factor(noun_stats2$key, levels = rev(noun_stats2$key))
noun_stats2 %>%
slice(1:20) %>%
ggplot(aes(x=key, y=as.factor(freq), fill=freq)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='Noun Occurrences'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
scale_fill_gradient(low="orange", high="orange3") +
geom_col()
#===========================
# Most Occuring ADJECTIVES
#===========================
adjstats = subset(tagging_data, upos %in% c("ADJ"))
adjstats2 = txt_freq(adjstats$token)
adjstats2$key = factor(adjstats2$key, levels = rev(adjstats2$key))
adjstats2 %>%
slice(1:20) %>%
ggplot(aes(x=key, y=as.factor(freq), fill=freq)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='Adjective Occurrences'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
scale_fill_gradient(low="chartreuse", high="chartreuse3") +
geom_col()
#======================
# Most Occuring VERBS
#======================
verbstats = subset(tagging_data, upos %in% c("VERB"))
verbstats2 = txt_freq(verbstats$token)
verbstats2$key = factor(verbstats2$key, levels = rev(verbstats2$key))
verbstats2 %>%
slice(1:20) %>%
ggplot(aes(x=key, y=as.factor(freq), fill=freq)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Frequency',
y='',
title='Verb Occurrences'
) +
theme(
legend.position = 'none',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10),
title = element_text(size = 13)
) +
scale_fill_gradient(low="tan", high="tan3") +
geom_col()
#===============
# What about X?
#===============
xstats = subset(tagging_data, upos %in% c("X"))
xstats2 = txt_freq(xstats$token)
xstats2$key = factor(xstats2$key, levels = rev(xstats2$key))
xstats2$key
# Contains hashtags, abbreviations, and Twitter account usernames. |
#Exploratory Data Analysis - Assignment 1 - Plot 2
#Importing the dataset, with file.choose
#The subset data (rom the dates 2007-02-01 and 2007-02-02) has been copied to another file.
#This is the one that will be selected
hhDataSubset <- read.delim(file.choose(),sep = ";", header = TRUE)
#Post import, doing some cursory checks
dim(hhDataSubset)
colnames(hhDataSubset)
#creating new column for plotting the graph versus date and time, by combining the date and time
hhDataSubset$tStr<-strptime(paste(hhDataSubset$Date,hhDataSubset$Time), format = "%d/%m/%Y %H:%M:%S")
plot(hhDataSubset$tStr, hhDataSubset$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (Kilowatts)")
#Looks Ok!
#Initiating procedure to print this to png
#open png and plot, with the required height and width
png("Plot2.png", width = 480, height = 480)
plot(hhDataSubset$tStr, hhDataSubset$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (Kilowatts)")
#close the png
dev.off()
| /Plot2.R | no_license | sanjaynvs/ExData_Plotting1 | R | false | false | 1,023 | r | #Exploratory Data Analysis - Assignment 1 - Plot 2
#Importing the dataset, with file.choose
#The subset data (rom the dates 2007-02-01 and 2007-02-02) has been copied to another file.
#This is the one that will be selected
hhDataSubset <- read.delim(file.choose(),sep = ";", header = TRUE)
#Post import, doing some cursory checks
dim(hhDataSubset)
colnames(hhDataSubset)
#creating new column for plotting the graph versus date and time, by combining the date and time
hhDataSubset$tStr<-strptime(paste(hhDataSubset$Date,hhDataSubset$Time), format = "%d/%m/%Y %H:%M:%S")
plot(hhDataSubset$tStr, hhDataSubset$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (Kilowatts)")
#Looks Ok!
#Initiating procedure to print this to png
#open png and plot, with the required height and width
png("Plot2.png", width = 480, height = 480)
plot(hhDataSubset$tStr, hhDataSubset$Global_active_power, type="l", xlab = "", ylab = "Global Active Power (Kilowatts)")
#close the png
dev.off()
|
# Chapter 4 Exercises.
library(Lahman)
library(tidyverse)
# 1. Relations between winning percentage and run differential across decadese.
# a. Fit linear regression model for the 60s, 70s, 80s, 90s
# 60s
my_60_teams <- Teams %>% filter(yearID > 1960, year < 1971) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
# 70s
my_70_teams <- Teams %>% filter(yearID > 1970, yearID < 1981) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
# 80s
my_80_teams <- Teams %>% filter(yearID > 1980, yearID < 1991) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
# 90s
my_90_teams <- Teams %>% filter(yearID > 1990, yearID < 2001) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
lin60 <- lm(Wpct ~ RD, data = my_60_teams)
lin70 <- lm(Wpct ~ RD, data = my_70_teams)
lin80 <- lm(Wpct ~ RD, data = my_80_teams)
lin90 <- lm(Wpct ~ RD, data = my_90_teams)
lin60
lin70
lin80
lin90
predict(lin60, data.frame(RD = 10))
predict(lin70, data.frame(RD = 10))
predict(lin80, data.frame(RD = 10))
predict(lin90, data.frame(RD = 10))
# This is the book's solution which is better. In the future, keep this in mind.
Teams %>% filter(yearID >= 1961, yearID <= 2000) %>%
mutate(Era = ifelse(
yearID <= 1970,
"1961-1970",
ifelse(
yearID <= 1980,
"1971-1980",
ifelse(yearID <= 1990, "1981-1990", "1991-2000")
)
),
WinPct = W / (W + L)) ->
Eras
one_fit <- function(years) {
lm(WinPct ~ I(R - RA),
data = filter(Eras, Era == years))
}
the_eras <- c("1961-1970", "1971-1980",
"1981-1990", "1991-2000")
four_fits <- lapply(the_eras, one_fit)
names(four_fits) <- the_eras
sapply(four_fits, coef)
p10 <- function(fit) {
predict(fit, data.frame(R = 30, RA = 20))
}
sapply(four_fits, p10)
#### 2 Pythagorean Residuals for Poor and Great Teams in the 19th Century
# a. Fit a Pythagorean formula model to the run-diff, W/L for team in the 19th century
my_19th_teams <- Teams %>% filter(yearID <= 1900) %>%
select(teamID, yearID, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
my_19th_teams <-
my_19th_teams %>% mutate(Wpct_pyt = R ^ 2 / (R ^ 2 + RA ^ 2))
lin19 <- lm(Wpct_pyt ~ RD, data = my_19th_teams)
library(broom)
res <- augment(lin19)
res %>% mutate(type = ifelse(Wpct_pyt > .7, "great",
ifelse(Wpct_pyt < .3, "bad", "other"))) -> res
ggplot(res, aes(Wpct_pyt, .resid, color = type)) +
geom_point() +
geom_hline(yintercept = 0)
# Book solution:
Teams %>% filter(yearID <= 1900) %>%
mutate(WinPct = W / (W + L)) ->
D_19th
#(b) By inspecting the residual plot of your fitted model from (a), did the great and poor teams in the 19th century do better or worse than one would expect on the basis of their run differentials?
# Below I construct a graph of the values of R - RA (horizontal) against the residual (vertical). I color the point by the winning proportion (bad is WinPct < .3 and great is WinPct > .7). We see some great teams with large positive residuals and bad teams with large negative residuals. By exploring further, can find the identity of the teams with the large residuals.
# This code also doesn't plot
fit <- lm(WinPct ~ I(R - RA), data = D_19th)
fit
library(broom)
out <- augment(fit)
out %>% mutate(type = ifelse(WinPct > .7, "great",
ifelse(WinPct < .3, "bad", "other"))) -> out
ggplot(out, aes(I.R...RA., .resid, color = type)) +
geom_point() +
geom_hline(yintercept = 0)
# 3 Exploring the manager effect in baseball. Retrosheet game logs report
# The book says Retrosheet, but the solution provided uses Lahman. This is partly
# my solution and the book's.
# a. Select a period of a least 10 years and fit the Pythagorean formula model to the RD/WL data
my_21st_teams <- Teams %>% filter(yearID > 2000, yearID < 2021) %>%
mutate(RD = R - RA, Wpct_pyt = R ^ 2 / (R ^ 2 + RA ^ 2))
lin21 <- lm(Wpct_pyt ~ RD, data = my_21st_teams)
# b. On the basis of the fit and list of managers
out <- augment(lin21, data = select(d, yearID, teamID,
R, RA))
out %>% inner_join(select(Managers, playerID, yearID,
teamID),
by = c("yearID", "teamID")) -> out
out %>% group_by(playerID) %>%
summarize(N = n(), Mean_Residual = mean(.resid)) %>%
arrange(desc(Mean_Residual)) -> out
head(out)
tail(out)
# 4. Pythagorean Relationship for other sports
# www.opensourcesports.com was not online. So, I'm skipping this one. | /Chapter4.R | permissive | coderjones/mlb-stats | R | false | false | 4,655 | r | # Chapter 4 Exercises.
library(Lahman)
library(tidyverse)
# 1. Relations between winning percentage and run differential across decadese.
# a. Fit linear regression model for the 60s, 70s, 80s, 90s
# 60s
my_60_teams <- Teams %>% filter(yearID > 1960, year < 1971) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
# 70s
my_70_teams <- Teams %>% filter(yearID > 1970, yearID < 1981) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
# 80s
my_80_teams <- Teams %>% filter(yearID > 1980, yearID < 1991) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
# 90s
my_90_teams <- Teams %>% filter(yearID > 1990, yearID < 2001) %>%
select(teamID, yearID, lgID, G, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
lin60 <- lm(Wpct ~ RD, data = my_60_teams)
lin70 <- lm(Wpct ~ RD, data = my_70_teams)
lin80 <- lm(Wpct ~ RD, data = my_80_teams)
lin90 <- lm(Wpct ~ RD, data = my_90_teams)
lin60
lin70
lin80
lin90
predict(lin60, data.frame(RD = 10))
predict(lin70, data.frame(RD = 10))
predict(lin80, data.frame(RD = 10))
predict(lin90, data.frame(RD = 10))
# This is the book's solution which is better. In the future, keep this in mind.
Teams %>% filter(yearID >= 1961, yearID <= 2000) %>%
mutate(Era = ifelse(
yearID <= 1970,
"1961-1970",
ifelse(
yearID <= 1980,
"1971-1980",
ifelse(yearID <= 1990, "1981-1990", "1991-2000")
)
),
WinPct = W / (W + L)) ->
Eras
one_fit <- function(years) {
lm(WinPct ~ I(R - RA),
data = filter(Eras, Era == years))
}
the_eras <- c("1961-1970", "1971-1980",
"1981-1990", "1991-2000")
four_fits <- lapply(the_eras, one_fit)
names(four_fits) <- the_eras
sapply(four_fits, coef)
p10 <- function(fit) {
predict(fit, data.frame(R = 30, RA = 20))
}
sapply(four_fits, p10)
#### 2 Pythagorean Residuals for Poor and Great Teams in the 19th Century
# a. Fit a Pythagorean formula model to the run-diff, W/L for team in the 19th century
my_19th_teams <- Teams %>% filter(yearID <= 1900) %>%
select(teamID, yearID, W, L, R, RA) %>%
mutate(RD = R - RA, Wpct = W / (W + L))
my_19th_teams <-
my_19th_teams %>% mutate(Wpct_pyt = R ^ 2 / (R ^ 2 + RA ^ 2))
lin19 <- lm(Wpct_pyt ~ RD, data = my_19th_teams)
library(broom)
res <- augment(lin19)
res %>% mutate(type = ifelse(Wpct_pyt > .7, "great",
ifelse(Wpct_pyt < .3, "bad", "other"))) -> res
ggplot(res, aes(Wpct_pyt, .resid, color = type)) +
geom_point() +
geom_hline(yintercept = 0)
# Book solution:
Teams %>% filter(yearID <= 1900) %>%
mutate(WinPct = W / (W + L)) ->
D_19th
#(b) By inspecting the residual plot of your fitted model from (a), did the great and poor teams in the 19th century do better or worse than one would expect on the basis of their run differentials?
# Below I construct a graph of the values of R - RA (horizontal) against the residual (vertical). I color the point by the winning proportion (bad is WinPct < .3 and great is WinPct > .7). We see some great teams with large positive residuals and bad teams with large negative residuals. By exploring further, can find the identity of the teams with the large residuals.
# This code also doesn't plot
fit <- lm(WinPct ~ I(R - RA), data = D_19th)
fit
library(broom)
out <- augment(fit)
out %>% mutate(type = ifelse(WinPct > .7, "great",
ifelse(WinPct < .3, "bad", "other"))) -> out
ggplot(out, aes(I.R...RA., .resid, color = type)) +
geom_point() +
geom_hline(yintercept = 0)
# 3 Exploring the manager effect in baseball. Retrosheet game logs report
# The book says Retrosheet, but the solution provided uses Lahman. This is partly
# my solution and the book's.
# a. Select a period of a least 10 years and fit the Pythagorean formula model to the RD/WL data
my_21st_teams <- Teams %>% filter(yearID > 2000, yearID < 2021) %>%
mutate(RD = R - RA, Wpct_pyt = R ^ 2 / (R ^ 2 + RA ^ 2))
lin21 <- lm(Wpct_pyt ~ RD, data = my_21st_teams)
# b. On the basis of the fit and list of managers
out <- augment(lin21, data = select(d, yearID, teamID,
R, RA))
out %>% inner_join(select(Managers, playerID, yearID,
teamID),
by = c("yearID", "teamID")) -> out
out %>% group_by(playerID) %>%
summarize(N = n(), Mean_Residual = mean(.resid)) %>%
arrange(desc(Mean_Residual)) -> out
head(out)
tail(out)
# 4. Pythagorean Relationship for other sports
# www.opensourcesports.com was not online. So, I'm skipping this one. |
# set file type
png(filename = "plot4.png",
width = 480, height = 480, units = "px")
# set colClasses
columns <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
# read in data
data <- read.table("Data/household_power_consumption.txt", sep=";", na.strings="?", header=TRUE, colClasses=columns, dec=".")
# add DateTime column
data$DateTime <- strptime(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# subset data to 2/1/2007 and 2/2/2007
februaryData <- data[data$Date %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")),]
# create charts
par(mfrow = c(2,2))
with(februaryData, {
plot(DateTime, Global_active_power, type="l", xlab = "", ylab="Global Active Power")
plot(DateTime, Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(DateTime, Sub_metering_1, type="l", xlab = "", ylab="Energy sub metering")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
legend("topright", lwd = 1, col=c("black","red","blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
plot(DateTime, Global_reactive_power, type="l", xlab="datetime")
})
# save file
dev.off()
| /Plot4.R | no_license | njkroes/ExData_Plotting1 | R | false | false | 1,300 | r | # set file type
png(filename = "plot4.png",
width = 480, height = 480, units = "px")
# set colClasses
columns <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
# read in data
data <- read.table("Data/household_power_consumption.txt", sep=";", na.strings="?", header=TRUE, colClasses=columns, dec=".")
# add DateTime column
data$DateTime <- strptime(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# subset data to 2/1/2007 and 2/2/2007
februaryData <- data[data$Date %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")),]
# create charts
par(mfrow = c(2,2))
with(februaryData, {
plot(DateTime, Global_active_power, type="l", xlab = "", ylab="Global Active Power")
plot(DateTime, Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(DateTime, Sub_metering_1, type="l", xlab = "", ylab="Energy sub metering")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
legend("topright", lwd = 1, col=c("black","red","blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
plot(DateTime, Global_reactive_power, type="l", xlab="datetime")
})
# save file
dev.off()
|
#Generate barplots for the taxonomy (class level) of the hub OTUs
## Libraries ##
library(Hmisc)
library(igraph)
library(rgexf)
library(ggplot2)
library(GGally)
library(network)
library(sna)
library(ggplot2)
library(tidyr)
library(VennDiagram)
tema=theme(axis.text.x = element_text(color="black",size=14), #angle=90,hjust=0.95,vjust=0.2),
axis.text.y = element_text(color="black",size=14),
axis.title = element_text(color="black",size=14),
legend.text = element_text(color = "black",size=13),
strip.text.x = element_text(size=15, color="black"),
panel.border =element_rect(color = "white", fill = NA) ,
panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"),
panel.grid.major.y = element_blank(),panel.grid.minor.x = element_blank(),
legend.position = "right")
aes4=c("#024AAC","#1DACA8","#10B62F", "#E2E41C","#F48F06","#F2252C","#D140B7", "grey80","grey20")
getPalette=colorRampPalette(aes4)
## Directories ##
#WINDOWS
setwd("C:/Users/victorfn/Google Drive/network_analysis_2018")
network.dir="C:/Users/victorfn/Google Drive/network_analysis_2018/github"
base.name= "CAM"
plot.dir=paste(network.dir,"plots",sep = "/") ; dir.create(plot.dir)
## Select samples to work with ##
compartment=c("soil","roo.zone.soil", "root.endosphere","rhizosphere","leaf.endosphere","phyllosphere")[c(3:6)]
plants=c("Agave.salmiana","Agave.tequilana","Agave.deserti","Cacti")[c(1,2,4)]
load(file = paste(getwd(),paste(base.name,"OTU_table",sep = "_"),sep = "/"))
load(file = paste(getwd(),paste(base.name,"metadata_table",sep = "_"),sep = "/"))
load(file = paste(getwd(),paste(base.name,"taxonomy_table",sep = "_"),sep = "/"))
#What kind of vertices do you wanna plot?
vert=c("all","hub","central","connected")[2]
#Please change the taxonomy rank manually (phylum,class recomended)
#generate plot data
plot=data.frame()
for(plant in plants){
for (comp in compartment) {
print(paste(plant,comp))
#Network data
load(file = paste(network.dir,paste(plant,comp,"igraph",sep = "."),sep = "/"))
nodes=read.delim(file = paste(network.dir,paste(plant,comp,"R_nodes","txt",sep = "."),sep = "/"))
random=read.delim(file = paste(network.dir,paste(plant,comp,"random_networks","txt",sep = "."),sep = "/"))
#select data
if (vert=="hub"){
nodes=nodes[nodes$random.hub==1,]
} else if (vert=="central") {
nodes=nodes[nodes$random.central==1,]
} else if (vert=="connected"){
nodes=nodes[nodes$random.grado==1,]
}
#set levels
nodes$class=factor(nodes$class, levels = unique(nodes$class))
#Calculate relative abundances
sub=data.frame(row.names=1:length(unique(nodes$class)),
count=summary(nodes$class, maxsum = dim(nodes)[1])/sum(summary(nodes$class, maxsum = dim(nodes)[1]))*100,
lineage=names(summary(nodes$class, maxsum = dim(nodes)[1])),
compartment=comp,
species=plant)
#Merge
plot=rbind(plot,sub)
}
}
#set levels for plot
taxas=sort(unique(as.character(plot$lineage)))[-1]
plot$lineage=factor(plot$lineage, levels = c(taxas,"Uncharacterized_class","low_abundant","Other"))
plot[is.na(plot$lineage),"lineage"]="Uncharacterized_class"
plot$species=factor(plot$species, levels = plants[c(2,1,3)])
#select desired taxa
pro=sort(unique(taxon[taxon[,2]=="Proteobacteria",3]))[-1]
fir=sort(unique(taxon[taxon[,2]=="Firmicutes",3]))[-1]
act=sort(unique(taxon[taxon[,2]=="Actinobacteria",3]))
bac=sort(unique(taxon[taxon[,2]=="Bacteroidetes",3]))[-1]
asc=sort(unique(taxon[taxon[,2]=="Ascomycota",3]))[-1]
plot[!(plot$lineage %in% c(pro,fir,act,bac,asc)),"lineage"]="Other"
#set levels for taxa
print(length(unique(plot$lineage)))
plot$compartment=factor(plot$compartment, levels =compartment )
a=ggplot(plot, aes(x=species ,y=count, fill=lineage))+
geom_bar(stat = "identity")+
facet_grid(cols = vars(compartment) )+
scale_fill_manual(values = c(getPalette(length(unique(plot$lineage))+1),"grey50"))+
scale_x_discrete(labels=c("At","As","Ca"))+
tema+ylab("Relative abundance %")
a
png(paste(plot.dir,paste(vert,"network_taxa","png",sep = "."),sep = "/"),
width = 1250, height = 600, units = "px", pointsize = 15, bg = "white", res=200, type="cairo")
print(a)
dev.off()
| /plot_taxa_networks.R | no_license | vicflonun/Agavinetworks | R | false | false | 4,523 | r | #Generate barplots for the taxonomy (class level) of the hub OTUs
## Libraries ##
library(Hmisc)
library(igraph)
library(rgexf)
library(ggplot2)
library(GGally)
library(network)
library(sna)
library(ggplot2)
library(tidyr)
library(VennDiagram)
tema=theme(axis.text.x = element_text(color="black",size=14), #angle=90,hjust=0.95,vjust=0.2),
axis.text.y = element_text(color="black",size=14),
axis.title = element_text(color="black",size=14),
legend.text = element_text(color = "black",size=13),
strip.text.x = element_text(size=15, color="black"),
panel.border =element_rect(color = "white", fill = NA) ,
panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"),
panel.grid.major.y = element_blank(),panel.grid.minor.x = element_blank(),
legend.position = "right")
aes4=c("#024AAC","#1DACA8","#10B62F", "#E2E41C","#F48F06","#F2252C","#D140B7", "grey80","grey20")
getPalette=colorRampPalette(aes4)
## Directories ##
#WINDOWS
setwd("C:/Users/victorfn/Google Drive/network_analysis_2018")
network.dir="C:/Users/victorfn/Google Drive/network_analysis_2018/github"
base.name= "CAM"
plot.dir=paste(network.dir,"plots",sep = "/") ; dir.create(plot.dir)
## Select samples to work with ##
compartment=c("soil","roo.zone.soil", "root.endosphere","rhizosphere","leaf.endosphere","phyllosphere")[c(3:6)]
plants=c("Agave.salmiana","Agave.tequilana","Agave.deserti","Cacti")[c(1,2,4)]
load(file = paste(getwd(),paste(base.name,"OTU_table",sep = "_"),sep = "/"))
load(file = paste(getwd(),paste(base.name,"metadata_table",sep = "_"),sep = "/"))
load(file = paste(getwd(),paste(base.name,"taxonomy_table",sep = "_"),sep = "/"))
#What kind of vertices do you wanna plot?
vert=c("all","hub","central","connected")[2]
#Please change the taxonomy rank manually (phylum,class recomended)
#generate plot data
plot=data.frame()
for(plant in plants){
for (comp in compartment) {
print(paste(plant,comp))
#Network data
load(file = paste(network.dir,paste(plant,comp,"igraph",sep = "."),sep = "/"))
nodes=read.delim(file = paste(network.dir,paste(plant,comp,"R_nodes","txt",sep = "."),sep = "/"))
random=read.delim(file = paste(network.dir,paste(plant,comp,"random_networks","txt",sep = "."),sep = "/"))
#select data
if (vert=="hub"){
nodes=nodes[nodes$random.hub==1,]
} else if (vert=="central") {
nodes=nodes[nodes$random.central==1,]
} else if (vert=="connected"){
nodes=nodes[nodes$random.grado==1,]
}
#set levels
nodes$class=factor(nodes$class, levels = unique(nodes$class))
#Calculate relative abundances
sub=data.frame(row.names=1:length(unique(nodes$class)),
count=summary(nodes$class, maxsum = dim(nodes)[1])/sum(summary(nodes$class, maxsum = dim(nodes)[1]))*100,
lineage=names(summary(nodes$class, maxsum = dim(nodes)[1])),
compartment=comp,
species=plant)
#Merge
plot=rbind(plot,sub)
}
}
#set levels for plot
taxas=sort(unique(as.character(plot$lineage)))[-1]
plot$lineage=factor(plot$lineage, levels = c(taxas,"Uncharacterized_class","low_abundant","Other"))
plot[is.na(plot$lineage),"lineage"]="Uncharacterized_class"
plot$species=factor(plot$species, levels = plants[c(2,1,3)])
#select desired taxa
pro=sort(unique(taxon[taxon[,2]=="Proteobacteria",3]))[-1]
fir=sort(unique(taxon[taxon[,2]=="Firmicutes",3]))[-1]
act=sort(unique(taxon[taxon[,2]=="Actinobacteria",3]))
bac=sort(unique(taxon[taxon[,2]=="Bacteroidetes",3]))[-1]
asc=sort(unique(taxon[taxon[,2]=="Ascomycota",3]))[-1]
plot[!(plot$lineage %in% c(pro,fir,act,bac,asc)),"lineage"]="Other"
#set levels for taxa
print(length(unique(plot$lineage)))
plot$compartment=factor(plot$compartment, levels =compartment )
a=ggplot(plot, aes(x=species ,y=count, fill=lineage))+
geom_bar(stat = "identity")+
facet_grid(cols = vars(compartment) )+
scale_fill_manual(values = c(getPalette(length(unique(plot$lineage))+1),"grey50"))+
scale_x_discrete(labels=c("At","As","Ca"))+
tema+ylab("Relative abundance %")
a
png(paste(plot.dir,paste(vert,"network_taxa","png",sep = "."),sep = "/"),
width = 1250, height = 600, units = "px", pointsize = 15, bg = "white", res=200, type="cairo")
print(a)
dev.off()
|
########################################################################################
## Prepare volume data
## Emilie Maddison
## May 17, 2020
########################################################################################
## -----------------------------------------------------------------------------------##
## 1. Set up workspace
## -----------------------------------------------------------------------------------##
## Clear environment
rm(list = ls())
## Set filepaths
if (Sys.info()[1] == "Linux") {
j <- FILEPATH
h <- FILEPATH
k <- FILEPATH
} else if (Sys.info()[1] == "Windows") {
j <- FILEPATH
h <- FILEPATH
k <- FILEPATH
}
## Set directories
code.dir <- FILEPATH
in.dir <- FILEPATH
out.dir <- FILEPATH
## Source functions
library(data.table)
source(paste0(FILEPATH, "helper_functions.R"))
source("FILEPATH/utility.r")
source(paste0(FILEPATH, "get_population.R"))
library(ggplot2)
## Today's date
date1 <- format(Sys.time(), "%Y%m%d")
## -----------------------------------------------------------------------------------##
## 2. Read in required data
## -----------------------------------------------------------------------------------##
## Create list of countries we are estimating for (135 L, LM, UM income countries)
locs_lm <- get_ig(locs)
locs_lm <- locs_lm[income_group != 'H',]
## Pull ST-GPR data from best models, for the ~40 countries with no volume data##
dt_hpv1 <- get_data(151160)
dt_ipv1 <- get_data(151163)
dt_je1 <- get_data(151166)
dt_mena1 <- get_data(151169)
dt_mr1 <- get_data(151172)
dt_pcv1 <- get_data(151175)
dt_penta1 <- get_data(151178)
dt_rvv1 <- get_data(151181)
dt_yf1 <- get_data(151184)
## Read in raw data ##
raw_dt1 <- fread(paste0(in.dir, "volume/total_volume_vaccines.csv"))
setnames(raw_dt1, "Index", "ihme_loc_id")
raw_dt1 <- get_location_id(raw_dt1)
head(raw_dt1)
unique(raw_dt1$Vaccine)
names(raw_dt1)
raw_dt1[Vaccine %in% c('Measles 1st', 'Measles 2nd', 'Measles', 'Rubella'), Vaccine := 'MR']
raw_dt <- raw_dt1[, .(volume = sum(volume)), .(location_id, Vaccine, year_id) ]
raw_dt <- raw_dt[!is.na(location_id),]
length(unique(raw_dt$location_id))
raw_locs <- unique(raw_dt$location_id)
## Subset raw data to each of the 9 vaccines
raw_dt_hpv <- raw_dt[Vaccine == "HPV", ]
raw_dt_ipv <- raw_dt[Vaccine == "IPV", ]
raw_dt_je <- raw_dt[Vaccine == "JE", ]
raw_dt_mena <- raw_dt[Vaccine == "MenA", ]
raw_dt_mr <- raw_dt[Vaccine == "MR", ]
raw_dt_pcv <- raw_dt[Vaccine == "PCV", ]
raw_dt_penta <- raw_dt[Vaccine == "Penta", ]
raw_dt_rvv <- raw_dt[Vaccine == "RVV", ]
raw_dt_yf <- raw_dt[Vaccine == "YF", ]
## Find which super-regions have 0 reported units of vaccine purchased. Use this to
## overrule St-GPR models. For example, Yellow Fever vaccines are not used in every
## country, but St-GPR doesn't know that. We will assume that in super-regions where
## sum(volume) == 0 for the entire time series, no country in that region buys that
## vaccine.
find_zero_regions <- function(raw_dt1) {
dt_test <- get_region(raw_dt1)
dt_test <- dt_test[, .(region_volume = sum(volume)), .(super_region_name)]
}
raw_dt_hpv_summary <- find_zero_regions(raw_dt_hpv) #all but NAME
raw_dt_ipv_summary <- find_zero_regions(raw_dt_ipv) # no zeros
raw_dt_je_summary <- find_zero_regions(raw_dt_je) #SAEAO, SA
raw_dt_mena_summary <- find_zero_regions(raw_dt_mena) #NAME, SSA
raw_dt_mr_summary <- find_zero_regions(raw_dt_mr) #no zeros
raw_dt_pcv_summary <- find_zero_regions(raw_dt_pcv) #no zeros
raw_dt_penta_summary <- find_zero_regions(raw_dt_penta) #no zeros
raw_dt_rvv_summary <- find_zero_regions(raw_dt_rvv) # no zeros
raw_dt_yf_summary <- find_zero_regions(raw_dt_yf) # NAME, SSA
## -----------------------------------------------------------------------------------##
## 3. Create final dataset using raw data where available ##
## Steps: Keep modeled data only where raw data does not exist,
## Transform into volume space using Under-15 population,
## Set to zero doses where the super-region total raw doses = 0, and
## Append raw data where it exists
## -----------------------------------------------------------------------------------##
pops <- get_population(location_id = 'all',
age_group_id = c(1, 6, 7),
year_id = c(2000:2018),
gbd_round_id = 6,
decomp_step = 'step4')
pops1 <- merge(pops, locs, by = 'location_id', all.y = T)
pops1 <- pops1[, .(population = sum(population)), .(location_id, year_id)]
# dt1 <- dt_hpv1
# raw_dt_summary <- raw_dt_hpv_summary
# raw_dt1 <- raw_dt_hpv
# vac_name <- "HPV"
transform_to_volume <- function(dt1, raw_dt_summary, raw_dt1, vac_name) {
dt <- get_region(dt1)
dt[, Vaccine := vac_name]
# dt <- dt[(!location_id %in% raw_locs) & (location_id %in% locs_lm$location_id), ]
dt <- dt[(location_id %in% locs_lm$location_id), ]
dt <- merge(dt, pops1, by = c('location_id', 'year_id'), all.x = T)
##Un-log transform and remove zero padding
dt[, `:=`(gpr_mean = exp(gpr_mean),
gpr_lower = exp(gpr_lower),
gpr_upper = exp(gpr_upper))]
adj_val <- min(dt$gpr_mean)
dt[, `:=`(gpr_mean = gpr_mean - adj_val,
gpr_lower = gpr_lower - adj_val,
gpr_upper = gpr_upper - adj_val)]
dt[, `:=`(volume1 = gpr_mean * population,
volume_lower1 = gpr_lower * population,
volume_upper1 = gpr_upper * population)]
dt <- merge(dt, raw_dt_summary, by = 'super_region_name', all.x = T)
dt[region_volume == 0, `:=`(volume1 = 0,
volume_lower1 = 0,
volume_upper1 = 0)]
# dt[volume < 0, volume := 0]
dt <- dt[, .(location_id, year_id, Vaccine, volume1, volume_lower1, volume_upper1)]
# dt <- rbind(dt, raw_dt1)
dt2 <- merge(dt, raw_dt1, by = c('location_id','Vaccine','year_id'), all.x = T)
return(dt)
}
dt_hpv <- transform_to_volume(dt_hpv1, raw_dt_hpv_summary, raw_dt_hpv, "HPV")
dt_ipv <- transform_to_volume(dt_ipv1, raw_dt_ipv_summary, raw_dt_ipv, "IPV")
dt_je <- transform_to_volume(dt_je1, raw_dt_je_summary, raw_dt_je, "JE")
dt_mena <- transform_to_volume(dt_mena1, raw_dt_mena_summary, raw_dt_mena, "MenA")
dt_mr <- transform_to_volume(dt_mr1, raw_dt_mr_summary, raw_dt_mr, "MR")
dt_pcv <- transform_to_volume(dt_pcv1, raw_dt_pcv_summary, raw_dt_pcv, "PCV")
dt_penta <- transform_to_volume(dt_penta1, raw_dt_penta_summary, raw_dt_penta, "Penta")
dt_rvv <- transform_to_volume(dt_rvv1, raw_dt_rvv_summary, raw_dt_rvv, "RVV")
dt_yf <- transform_to_volume(dt_yf1, raw_dt_yf_summary, raw_dt_yf, "YF")
## -----------------------------------------------------------------------------------##
## 4. Test data
## to make sure all countires and years are present, and that all values are positive
## -----------------------------------------------------------------------------------##
check_shape <- function(dt1) {
length(unique(dt1$location_id)) == 135
length(unique(dt1$year_id)) == 19
min(dt1$volume) >= 0
}
check_shape(dt_hpv)
check_shape(dt_ipv)
check_shape(dt_je)
check_shape(dt_mena)
check_shape(dt_mr)
check_shape(dt_pcv)
check_shape(dt_penta)
check_shape(dt_rvv)
check_shape(dt_yf)
## Create aggregated dataset
dt_volume_all <- rbind(dt_hpv, dt_ipv, dt_je, dt_mena, dt_mr, dt_pcv, dt_penta, dt_rvv, dt_yf)
dt_volume_all <- get_ihme_loc(dt_volume_all)
## Remove clutter
rm(dt_hpv1, dt_ipv1, dt_mena1, dt_mr1, dt_pcv1, dt_penta1, dt_rvv1, dt_yf1)
rm(raw_dt_hpv_summary, raw_dt_ipv_summary, raw_dt_mena_summary, raw_dt_mr_summary,
raw_dt_pcv_summary, raw_dt_penta_summary, raw_dt_rvv_summary, raw_dt_yf_summary)
## -----------------------------------------------------------------------------------##
## 6. Write Final dataset
## -----------------------------------------------------------------------------------##
fwrite(dt_volume_all, paste0(in.dir, "volume/135_total_volume_vaccines_", date1, ".csv"))
## END OF SCRIPT ##
| /Immunization Financing Project/code/out-of-pocket/03_stgpr/03b_4_prep_volume_results.R | no_license | hashimig/Resource_Tracking_Domestic_Health_Accounts | R | false | false | 7,963 | r | ########################################################################################
## Prepare volume data
## Emilie Maddison
## May 17, 2020
########################################################################################
## -----------------------------------------------------------------------------------##
## 1. Set up workspace
## -----------------------------------------------------------------------------------##
## Clear environment
rm(list = ls())
## Set filepaths
if (Sys.info()[1] == "Linux") {
j <- FILEPATH
h <- FILEPATH
k <- FILEPATH
} else if (Sys.info()[1] == "Windows") {
j <- FILEPATH
h <- FILEPATH
k <- FILEPATH
}
## Set directories
code.dir <- FILEPATH
in.dir <- FILEPATH
out.dir <- FILEPATH
## Source functions
library(data.table)
source(paste0(FILEPATH, "helper_functions.R"))
source("FILEPATH/utility.r")
source(paste0(FILEPATH, "get_population.R"))
library(ggplot2)
## Today's date
date1 <- format(Sys.time(), "%Y%m%d")
## -----------------------------------------------------------------------------------##
## 2. Read in required data
## -----------------------------------------------------------------------------------##
## Create list of countries we are estimating for (135 L, LM, UM income countries)
locs_lm <- get_ig(locs)
locs_lm <- locs_lm[income_group != 'H',]
## Pull ST-GPR data from best models, for the ~40 countries with no volume data##
dt_hpv1 <- get_data(151160)
dt_ipv1 <- get_data(151163)
dt_je1 <- get_data(151166)
dt_mena1 <- get_data(151169)
dt_mr1 <- get_data(151172)
dt_pcv1 <- get_data(151175)
dt_penta1 <- get_data(151178)
dt_rvv1 <- get_data(151181)
dt_yf1 <- get_data(151184)
## Read in raw data ##
raw_dt1 <- fread(paste0(in.dir, "volume/total_volume_vaccines.csv"))
setnames(raw_dt1, "Index", "ihme_loc_id")
raw_dt1 <- get_location_id(raw_dt1)
head(raw_dt1)
unique(raw_dt1$Vaccine)
names(raw_dt1)
raw_dt1[Vaccine %in% c('Measles 1st', 'Measles 2nd', 'Measles', 'Rubella'), Vaccine := 'MR']
raw_dt <- raw_dt1[, .(volume = sum(volume)), .(location_id, Vaccine, year_id) ]
raw_dt <- raw_dt[!is.na(location_id),]
length(unique(raw_dt$location_id))
raw_locs <- unique(raw_dt$location_id)
## Subset raw data to each of the 9 vaccines
raw_dt_hpv <- raw_dt[Vaccine == "HPV", ]
raw_dt_ipv <- raw_dt[Vaccine == "IPV", ]
raw_dt_je <- raw_dt[Vaccine == "JE", ]
raw_dt_mena <- raw_dt[Vaccine == "MenA", ]
raw_dt_mr <- raw_dt[Vaccine == "MR", ]
raw_dt_pcv <- raw_dt[Vaccine == "PCV", ]
raw_dt_penta <- raw_dt[Vaccine == "Penta", ]
raw_dt_rvv <- raw_dt[Vaccine == "RVV", ]
raw_dt_yf <- raw_dt[Vaccine == "YF", ]
## Find which super-regions have 0 reported units of vaccine purchased. Use this to
## overrule St-GPR models. For example, Yellow Fever vaccines are not used in every
## country, but St-GPR doesn't know that. We will assume that in super-regions where
## sum(volume) == 0 for the entire time series, no country in that region buys that
## vaccine.
find_zero_regions <- function(raw_dt1) {
dt_test <- get_region(raw_dt1)
dt_test <- dt_test[, .(region_volume = sum(volume)), .(super_region_name)]
}
raw_dt_hpv_summary <- find_zero_regions(raw_dt_hpv) #all but NAME
raw_dt_ipv_summary <- find_zero_regions(raw_dt_ipv) # no zeros
raw_dt_je_summary <- find_zero_regions(raw_dt_je) #SAEAO, SA
raw_dt_mena_summary <- find_zero_regions(raw_dt_mena) #NAME, SSA
raw_dt_mr_summary <- find_zero_regions(raw_dt_mr) #no zeros
raw_dt_pcv_summary <- find_zero_regions(raw_dt_pcv) #no zeros
raw_dt_penta_summary <- find_zero_regions(raw_dt_penta) #no zeros
raw_dt_rvv_summary <- find_zero_regions(raw_dt_rvv) # no zeros
raw_dt_yf_summary <- find_zero_regions(raw_dt_yf) # NAME, SSA
## -----------------------------------------------------------------------------------##
## 3. Create final dataset using raw data where available ##
## Steps: Keep modeled data only where raw data does not exist,
## Transform into volume space using Under-15 population,
## Set to zero doses where the super-region total raw doses = 0, and
## Append raw data where it exists
## -----------------------------------------------------------------------------------##
pops <- get_population(location_id = 'all',
age_group_id = c(1, 6, 7),
year_id = c(2000:2018),
gbd_round_id = 6,
decomp_step = 'step4')
pops1 <- merge(pops, locs, by = 'location_id', all.y = T)
pops1 <- pops1[, .(population = sum(population)), .(location_id, year_id)]
# dt1 <- dt_hpv1
# raw_dt_summary <- raw_dt_hpv_summary
# raw_dt1 <- raw_dt_hpv
# vac_name <- "HPV"
transform_to_volume <- function(dt1, raw_dt_summary, raw_dt1, vac_name) {
dt <- get_region(dt1)
dt[, Vaccine := vac_name]
# dt <- dt[(!location_id %in% raw_locs) & (location_id %in% locs_lm$location_id), ]
dt <- dt[(location_id %in% locs_lm$location_id), ]
dt <- merge(dt, pops1, by = c('location_id', 'year_id'), all.x = T)
##Un-log transform and remove zero padding
dt[, `:=`(gpr_mean = exp(gpr_mean),
gpr_lower = exp(gpr_lower),
gpr_upper = exp(gpr_upper))]
adj_val <- min(dt$gpr_mean)
dt[, `:=`(gpr_mean = gpr_mean - adj_val,
gpr_lower = gpr_lower - adj_val,
gpr_upper = gpr_upper - adj_val)]
dt[, `:=`(volume1 = gpr_mean * population,
volume_lower1 = gpr_lower * population,
volume_upper1 = gpr_upper * population)]
dt <- merge(dt, raw_dt_summary, by = 'super_region_name', all.x = T)
dt[region_volume == 0, `:=`(volume1 = 0,
volume_lower1 = 0,
volume_upper1 = 0)]
# dt[volume < 0, volume := 0]
dt <- dt[, .(location_id, year_id, Vaccine, volume1, volume_lower1, volume_upper1)]
# dt <- rbind(dt, raw_dt1)
dt2 <- merge(dt, raw_dt1, by = c('location_id','Vaccine','year_id'), all.x = T)
return(dt)
}
dt_hpv <- transform_to_volume(dt_hpv1, raw_dt_hpv_summary, raw_dt_hpv, "HPV")
dt_ipv <- transform_to_volume(dt_ipv1, raw_dt_ipv_summary, raw_dt_ipv, "IPV")
dt_je <- transform_to_volume(dt_je1, raw_dt_je_summary, raw_dt_je, "JE")
dt_mena <- transform_to_volume(dt_mena1, raw_dt_mena_summary, raw_dt_mena, "MenA")
dt_mr <- transform_to_volume(dt_mr1, raw_dt_mr_summary, raw_dt_mr, "MR")
dt_pcv <- transform_to_volume(dt_pcv1, raw_dt_pcv_summary, raw_dt_pcv, "PCV")
dt_penta <- transform_to_volume(dt_penta1, raw_dt_penta_summary, raw_dt_penta, "Penta")
dt_rvv <- transform_to_volume(dt_rvv1, raw_dt_rvv_summary, raw_dt_rvv, "RVV")
dt_yf <- transform_to_volume(dt_yf1, raw_dt_yf_summary, raw_dt_yf, "YF")
## -----------------------------------------------------------------------------------##
## 4. Test data
## to make sure all countires and years are present, and that all values are positive
## -----------------------------------------------------------------------------------##
check_shape <- function(dt1) {
length(unique(dt1$location_id)) == 135
length(unique(dt1$year_id)) == 19
min(dt1$volume) >= 0
}
check_shape(dt_hpv)
check_shape(dt_ipv)
check_shape(dt_je)
check_shape(dt_mena)
check_shape(dt_mr)
check_shape(dt_pcv)
check_shape(dt_penta)
check_shape(dt_rvv)
check_shape(dt_yf)
## Create aggregated dataset
dt_volume_all <- rbind(dt_hpv, dt_ipv, dt_je, dt_mena, dt_mr, dt_pcv, dt_penta, dt_rvv, dt_yf)
dt_volume_all <- get_ihme_loc(dt_volume_all)
## Remove clutter
rm(dt_hpv1, dt_ipv1, dt_mena1, dt_mr1, dt_pcv1, dt_penta1, dt_rvv1, dt_yf1)
rm(raw_dt_hpv_summary, raw_dt_ipv_summary, raw_dt_mena_summary, raw_dt_mr_summary,
raw_dt_pcv_summary, raw_dt_penta_summary, raw_dt_rvv_summary, raw_dt_yf_summary)
## -----------------------------------------------------------------------------------##
## 6. Write Final dataset
## -----------------------------------------------------------------------------------##
fwrite(dt_volume_all, paste0(in.dir, "volume/135_total_volume_vaccines_", date1, ".csv"))
## END OF SCRIPT ##
|
\name{setEdgeTargetArrowColorRule}
\alias{setEdgeTargetArrowColorRule}
\alias{setEdgeTargetArrowColorRule,CytoscapeWindowClass-method}
\title{Specify rule for the target arrow color}
\description{
Specify how edge attributes -- that is, data values of the specified edge
attribute -- control the color of the target arrow, found at the end of
an edge, where it connects to the target node.}
\usage{
setEdgeTargetArrowColorRule(obj, edge.attribute.name, control.points, colors, mode="interpolate", default.color='#000000')
}
\arguments{
\item{obj}{a \code{CytoscapeWindowClass} object. }
\item{edge.attribute.name}{the edge attribute whose values will determine
the color of the target arrow of each edge when this rule is applied.}
\item{control.points}{A list of scalar, discrete values. For instance,
interaction types: 'phosphorylates', 'ubiquinates', 'represses', 'activates'}
\item{colors}{A color for each of the attribute.values}
\item{mode}{either 'interpolate' or 'lookup'.}
\item{default.color}{The color to use when an explicit mapping is not
provided.}
}
\value{
None.
}
\author{Tanja Muetze, Georgi Kolishovski, Paul Shannon}
\seealso{
setNodeBorderColorRule (detailed example)
\code{\link{setEdgeSourceArrowColorRule}}
setEdgeColorRule
setNodeShapeRule
}
\examples{
\dontrun{
# first, delete existing windows to save memory:
deleteAllWindows(CytoscapeConnection())
# send and display network
cw <- CytoscapeWindow ('setEdgeTargetArrowColorRule.test', graph=makeSimpleGraph())
displayGraph (cw)
layoutNetwork (cw, 'force-directed')
# add edge arrows
arrows <- c ('CIRCLE', 'ARROW', 'DIAMOND')
edgeType.values <- c ('phosphorylates', 'synthetic lethal', 'undefined')
setEdgeTargetArrowRule (cw, 'edgeType', edgeType.values, arrows)
colors <- c ("#AA00AA", "#AAAA00", "#AA0000")
edgeType.values <- c ('phosphorylates', 'synthetic lethal', 'undefined')
# set rule
setEdgeTargetArrowColorRule (cw, 'edgeType', edgeType.values, colors, mode='lookup')
# if not specified, the mode is interpolate
colors <- c ("#FFFFFF", "#00FF00", "#00AA00", "#FF0000", "#AA0000")
control.points <- c( -12.0, 35.0, 0.0 )
setEdgeTargetArrowColorRule(cw, 'score', control.points, colors)
}
}
\keyword{graph}
| /man/setEdgeTargetArrowColorRule.Rd | no_license | sathishsrinivasank/Bioconductor_RCy3_the_new_RCytoscape | R | false | false | 2,296 | rd | \name{setEdgeTargetArrowColorRule}
\alias{setEdgeTargetArrowColorRule}
\alias{setEdgeTargetArrowColorRule,CytoscapeWindowClass-method}
\title{Specify rule for the target arrow color}
\description{
Specify how edge attributes -- that is, data values of the specified edge
attribute -- control the color of the target arrow, found at the end of
an edge, where it connects to the target node.}
\usage{
setEdgeTargetArrowColorRule(obj, edge.attribute.name, control.points, colors, mode="interpolate", default.color='#000000')
}
\arguments{
\item{obj}{a \code{CytoscapeWindowClass} object. }
\item{edge.attribute.name}{the edge attribute whose values will determine
the color of the target arrow of each edge when this rule is applied.}
\item{control.points}{A list of scalar, discrete values. For instance,
interaction types: 'phosphorylates', 'ubiquinates', 'represses', 'activates'}
\item{colors}{A color for each of the attribute.values}
\item{mode}{either 'interpolate' or 'lookup'.}
\item{default.color}{The color to use when an explicit mapping is not
provided.}
}
\value{
None.
}
\author{Tanja Muetze, Georgi Kolishovski, Paul Shannon}
\seealso{
setNodeBorderColorRule (detailed example)
\code{\link{setEdgeSourceArrowColorRule}}
setEdgeColorRule
setNodeShapeRule
}
\examples{
\dontrun{
# first, delete existing windows to save memory:
deleteAllWindows(CytoscapeConnection())
# send and display network
cw <- CytoscapeWindow ('setEdgeTargetArrowColorRule.test', graph=makeSimpleGraph())
displayGraph (cw)
layoutNetwork (cw, 'force-directed')
# add edge arrows
arrows <- c ('CIRCLE', 'ARROW', 'DIAMOND')
edgeType.values <- c ('phosphorylates', 'synthetic lethal', 'undefined')
setEdgeTargetArrowRule (cw, 'edgeType', edgeType.values, arrows)
colors <- c ("#AA00AA", "#AAAA00", "#AA0000")
edgeType.values <- c ('phosphorylates', 'synthetic lethal', 'undefined')
# set rule
setEdgeTargetArrowColorRule (cw, 'edgeType', edgeType.values, colors, mode='lookup')
# if not specified, the mode is interpolate
colors <- c ("#FFFFFF", "#00FF00", "#00AA00", "#FF0000", "#AA0000")
control.points <- c( -12.0, 35.0, 0.0 )
setEdgeTargetArrowColorRule(cw, 'score', control.points, colors)
}
}
\keyword{graph}
|
# directories
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
source(file.path(root_dir, "code", "utils", "define_directories.R"))
# source functions
source(file.path(patient_level_analyses_utils, 'kaplan_meier.R'))
# recurrent alterations
kaplan_meier_pediatric <- kaplan_meier(all_cor = pbta_pnoc008_nn_table,
surv_data = pbta_survival)
# save output
saveRDS(kaplan_meier_pediatric, file = file.path(topDir, "output", "kaplan_meier_pediatric.rds"))
| /code/patient_level_analyses/p5_kaplan_meier_pediatric.R | no_license | Shicheng-Guo/OMPARE | R | false | false | 507 | r | # directories
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
source(file.path(root_dir, "code", "utils", "define_directories.R"))
# source functions
source(file.path(patient_level_analyses_utils, 'kaplan_meier.R'))
# recurrent alterations
kaplan_meier_pediatric <- kaplan_meier(all_cor = pbta_pnoc008_nn_table,
surv_data = pbta_survival)
# save output
saveRDS(kaplan_meier_pediatric, file = file.path(topDir, "output", "kaplan_meier_pediatric.rds"))
|
# Test whether a correlation between two variables changes
# from one within-subject condition to another.
N_Subj <- 60
N_Within <- 2
rel_str = 0.3
subj_var = 1.5;
subj <- rep(1:N_Subj, each=N_Within)
subj_effect <- subj_var * rep(rnorm(N_Subj), each=N_Within)
condition_within_subject <- rep(1:N_Within, length.out=length(subj))
x <- rnorm(length(condition_within_subject))
y <- rnorm(length(condition_within_subject))
y[condition_within_subject==2] = rel_str * x[condition_within_subject==2] + (1 - rel_str) * rnorm(length(condition_within_subject)/2)
y <- y + subj_effect
subj <- as.factor(subj)
condition_within_subject <- as.factor(condition_within_subject)
Data <- data.frame(subj, condition_within_subject, x, y)
test_cor = function(Data) {
ind_cond1 <- Data$condition_within_subject == 1
ind_cond2 <- Data$condition_within_subject == 2
r.jk_obs <- cor(Data$x[ind_cond1], Data$y[ind_cond1])
r.hm_obs <- cor(Data$x[ind_cond2], Data$y[ind_cond2])
drv <- c()
nIts <- 10000
for (iIt in 1:nIts) {
# Shuffle both x-scores over participants
X <- matrix(Data$x, nrow = 2)
X <- X[, sample(ncol(X))]
x <- as.vector(X)
r.jk_sh <- cor(x[ind_cond1], y[ind_cond1])
r.hm_sh <- cor(x[ind_cond2], y[ind_cond2])
dr <- abs(r.jk_sh - r.hm_sh)
drv <- c(drv, dr)
}
p <- sum(drv >= abs(r.jk_obs - r.hm_obs))/length(drv)
cat('MC p = ', p, '\n')
}
test_cor(Data)
| /test_dep_cor_sims.R | permissive | thomasgladwin/test_dep_cor_sims | R | false | false | 1,456 | r | # Test whether a correlation between two variables changes
# from one within-subject condition to another.
N_Subj <- 60
N_Within <- 2
rel_str = 0.3
subj_var = 1.5;
subj <- rep(1:N_Subj, each=N_Within)
subj_effect <- subj_var * rep(rnorm(N_Subj), each=N_Within)
condition_within_subject <- rep(1:N_Within, length.out=length(subj))
x <- rnorm(length(condition_within_subject))
y <- rnorm(length(condition_within_subject))
y[condition_within_subject==2] = rel_str * x[condition_within_subject==2] + (1 - rel_str) * rnorm(length(condition_within_subject)/2)
y <- y + subj_effect
subj <- as.factor(subj)
condition_within_subject <- as.factor(condition_within_subject)
Data <- data.frame(subj, condition_within_subject, x, y)
test_cor = function(Data) {
ind_cond1 <- Data$condition_within_subject == 1
ind_cond2 <- Data$condition_within_subject == 2
r.jk_obs <- cor(Data$x[ind_cond1], Data$y[ind_cond1])
r.hm_obs <- cor(Data$x[ind_cond2], Data$y[ind_cond2])
drv <- c()
nIts <- 10000
for (iIt in 1:nIts) {
# Shuffle both x-scores over participants
X <- matrix(Data$x, nrow = 2)
X <- X[, sample(ncol(X))]
x <- as.vector(X)
r.jk_sh <- cor(x[ind_cond1], y[ind_cond1])
r.hm_sh <- cor(x[ind_cond2], y[ind_cond2])
dr <- abs(r.jk_sh - r.hm_sh)
drv <- c(drv, dr)
}
p <- sum(drv >= abs(r.jk_obs - r.hm_obs))/length(drv)
cat('MC p = ', p, '\n')
}
test_cor(Data)
|
rmarkdown::render(
"produce_full_report.Rmd",
output_format = rmdformats::readthedown(
self_contained = FALSE,
css = "mystyle.css",
include = list(before_body = "header.html",
after_body = "footer.html")
)
)
| /src/produce_full_report/script.R | no_license | rcleoni/covid19-forecasts-orderly | R | false | false | 245 | r | rmarkdown::render(
"produce_full_report.Rmd",
output_format = rmdformats::readthedown(
self_contained = FALSE,
css = "mystyle.css",
include = list(before_body = "header.html",
after_body = "footer.html")
)
)
|
#' @useDynLib compboost, .registration = TRUE
NULL
#' @import Rcpp
NULL
| /R/zzz.R | permissive | QuayAu/compboost | R | false | false | 73 | r | #' @useDynLib compboost, .registration = TRUE
NULL
#' @import Rcpp
NULL
|
## Time-consuming computations can be speeded up by cacheing the result if the content of e.g. vector or matrix has not changed.
## So, instead of repeating the computation, the program will look up the cached value of the computation (which is stored in
## a different environment than the current environment).
## This function makes a 'special' matrix object that can cache its inverse; a square invertible matrix.
## It creates a list containing a function to set value of matrix, get the matrix, set the inverse and get the inverse of the matrix.
## List is used as input in next function 'cacheSolve'
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function()x
setmatrix<-function(solve)m <<- solve ##
getmatrix<-function()m
list(set=set,get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## The following function calculates and returns the inverse of the matrix created in the function above.
## But, first it checks whether if the inverse of the matrix already has been calculated.
## If this is the case,it gets it from cache and skips calculation; otherwise it computes the inverse and
## saved it in cache via set matrix function.
cacheSolve<-function(x=matrix,...){
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix,...)
x$setmatrix(m)
m
}
| /cachematrix.R | no_license | toracornelia/ProgrammingAssignment2 | R | false | false | 1,750 | r | ## Time-consuming computations can be speeded up by cacheing the result if the content of e.g. vector or matrix has not changed.
## So, instead of repeating the computation, the program will look up the cached value of the computation (which is stored in
## a different environment than the current environment).
## This function makes a 'special' matrix object that can cache its inverse; a square invertible matrix.
## It creates a list containing a function to set value of matrix, get the matrix, set the inverse and get the inverse of the matrix.
## List is used as input in next function 'cacheSolve'
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function()x
setmatrix<-function(solve)m <<- solve ##
getmatrix<-function()m
list(set=set,get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## The following function calculates and returns the inverse of the matrix created in the function above.
## But, first it checks whether if the inverse of the matrix already has been calculated.
## If this is the case,it gets it from cache and skips calculation; otherwise it computes the inverse and
## saved it in cache via set matrix function.
cacheSolve<-function(x=matrix,...){
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix,...)
x$setmatrix(m)
m
}
|
#' Function to simulate multiple input structures
#'
#' @description Function is designed to evaluate several different inputs.
#'
#' `r lifecycle::badge('experimental')`
#'
#' @details Function is using several other functions to perform sets of operations
#' designed to test several inputs. Designed to validate model settings
#'
#' @author (C) 2021 Vladimir Zhbanko
#'
#' @param timeframe Integer, Data timeframe e.g. 60 min. This will be equal to 1 bar
#' @param path_sim_input String, Path to the folder where csv files will be placed, typically AI_RSIADXAUDCAD60.csv
#' @param path_sim_result String, Path to the folder where all results from simulations shall be written
#' @param par_simulate Integer, Parameter that can be used in simulation
#' @param demo_mode Boolean, Simplify function test. When TRUE no simulation will be made
#'
#' @return Function is writing file into Decision Support System folders
#' @export
#'
#' @examples
#'
#' \donttest{
#'
#' library(dplyr)
#' library(magrittr)
#' library(readr)
#' library(h2o)
#' library(lazytrade)
#' library(lubridate)
#' library(stats)
#'
#' path_input <- normalizePath(tempdir(),winslash = "/")
#' path_sim_input <- file.path(path_input, "path_sim_input")
#' dir.create(path_sim_input)
#' path_sim_result <- file.path(path_input, "path_sim_result")
#' dir.create(path_sim_result)
#'
#' file.copy(from = system.file("extdata", "AI_RSIADXCADCHF60.csv", package = "lazytrade"),
#' to = file.path(path_sim_input, "AI_RSIADXCADCHF60.csv"), overwrite = TRUE)
#' file.copy(from = system.file("extdata", "AI_RSIADXEURNZD60.csv", package = "lazytrade"),
#' to = file.path(path_sim_input, "AI_RSIADXEURNZD60.csv"), overwrite = TRUE)
#'
#' # start h2o engine
#' h2o.init(nthreads = 2)
#'
#' # simulation of different epoch values
#' aml_simulation(timeframe = 60,
#' path_sim_input = path_sim_input,
#' path_sim_result = path_sim_result,
#' par_simulate = 3,
#' demo_mode = TRUE)
#'
#' Sys.sleep(5)
#' # stop h2o engine
#' h2o.shutdown(prompt = FALSE)
#'
#' #set delay to insure h2o unit closes properly before the next test
#' Sys.sleep(5)
#'
#' }
#'
#'
#'
aml_simulation <- function(timeframe = 60, path_sim_input, path_sim_result,
par_simulate = 100, demo_mode = FALSE){
requireNamespace("dplyr", quietly = TRUE)
requireNamespace("readr", quietly = TRUE)
requireNamespace("h2o", quietly = TRUE)
# Function should perform simulation for one set of Neural Network Inputs
## Generate subfolders needed for data storage and processing for one set
#path with the data where rds files will be recorded
path_sim_data <- file.path(path_sim_input, "_DATA")
if(!dir.exists(path_sim_data)){dir.create(path_sim_data)}
#path with models
path_sim_models <- file.path(path_sim_input, "_MODELS")
if(!dir.exists(path_sim_models)){dir.create(path_sim_models)}
# 2 dummy paths to drop str test results
path_sim_sbxm <- file.path(path_sim_input, "_SBXM")
if(!dir.exists(path_sim_sbxm)){dir.create(path_sim_sbxm)}
path_sim_sbxs <- file.path(path_sim_input, "_SBXS")
if(!dir.exists(path_sim_sbxs)){dir.create(path_sim_sbxs)}
# =================================
# collect data
# =================================
#### Read inputs ==========================================
# read files for which symbols are actually placed to the folder path_sim_input
myFiles <- list.files(path_sim_input,pattern = "AI_RSIADX", all.files = TRUE)
mySymbols <- stringr::str_remove(myFiles, pattern = "AI_RSIADX")
mySymbols <- stringr::str_remove(mySymbols, pattern = as.character(timeframe))
mySymbols <- stringr::str_remove(mySymbols, pattern = ".csv")
#time frames used
timeframeHP <- timeframe
#copy file with tick size info
tick = system.file("extdata", "TickSize_AI_RSIADX.csv",
package = "lazytrade") %>% read_csv(col_names = FALSE)
write_csv(tick, file.path(path_sim_data, "TickSize_AI_RSIADX.csv"), col_names = FALSE)
# Writing indicator and price change to the file
for (PAIR in mySymbols) {
# PAIR <- mySymbols[1]
# performing data collection
indHP = file.path(path_sim_input, paste0("AI_RSIADX",PAIR,timeframeHP,".csv")) %>%
readr::read_csv(col_names = FALSE)
indHP$X1 <- lubridate::ymd_hms(indHP$X1)
# data transformation using the custom function for one symbol
lazytrade::aml_collect_data(indicator_dataset = indHP,
symbol = PAIR,
timeframe = timeframeHP,
path_data = path_sim_data,
max_nrows = 15000)
#full_path <- file.path(path_data, 'AI_RSIADXEURUSD60.rds')
#x1 <- read_rds(full_path)
}
if(!demo_mode){
# =================================
# force model update
# =================================
#path to store logs data (e.g. duration of machine learning steps)
#h2o.init()
# Writing indicator and price change to the file
for (PAIR in mySymbols) {
## PAIR <- mySymbols[1]
# performing Deep Learning Regression using the custom function
lazytrade::aml_make_model(symbol = PAIR,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
force_update = TRUE,
objective_test = TRUE,
num_nn_options = 48,
num_epoch = par_simulate,
num_bars_test = 600,
num_bars_ahead = 34,
num_cols_used = 10)
}
# =================================
# test build test...
# =================================
# Performing Testing => Building -> Testing...
for (PAIR in mySymbols) {
## PAIR <- mySymbols[1]
# repeat testing and training several times
lazytrade::aml_test_model(symbol = PAIR,
num_bars = 600,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs)
}
perf <- lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.8,
get_quantile = TRUE)
# function to write log to the _LOG folder
lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.75,
get_quantile = FALSE,
log_results = TRUE,
path_logs = path_sim_result)
for (PAIR in mySymbols) {
lazytrade::aml_make_model(symbol = PAIR,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
force_update=FALSE,
objective_test = TRUE,
min_perf = perf,
num_nn_options = 24,
num_epoch = par_simulate,
num_bars_test = 600,
num_bars_ahead = 34,
num_cols_used = 10)
lazytrade::aml_test_model(symbol = PAIR,
num_bars = 600,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs)
}
perf <- lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.5,
get_quantile = TRUE)
# function to write log to the _LOG folder
lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.75,
get_quantile = FALSE,
log_results = TRUE,
path_logs = path_sim_result)
for (PAIR in mySymbols) {
lazytrade::aml_make_model(symbol = PAIR,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
force_update=FALSE,
objective_test = TRUE,
num_nn_options = 24,
num_epoch = par_simulate,
num_bars_test = 600,
num_bars_ahead = 34,
num_cols_used = 10,
min_perf = perf)
lazytrade::aml_test_model(symbol = PAIR,
num_bars = 600,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs)
}
# stop h2o engine
#h2o.shutdown(prompt = F)
AverPerf <- lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.5,
get_quantile = TRUE)
# function to write log to the _LOG folder
Qntil = lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.75,
get_quantile = FALSE,
log_results = TRUE,
path_logs = path_sim_result)
#setup a log dataframe to consolidate results of particular sets
logs <- data.frame(TimeTest = Sys.time(),
Folder = path_sim_input,
MeanPerf = AverPerf, HighPerf = Qntil$Quantil)
#read existing log (if exists) and add there a new log data
if(!file.exists(file.path(path_sim_result, 'all_results.rds'))){
write_rds(logs, file.path(path_sim_result, 'all_results.rds'))
} else {
read_rds(file.path(path_sim_result, 'all_results.rds')) %>%
bind_rows(logs) %>%
write_rds(file.path(path_sim_result, 'all_results.rds'))
}
} #end of test bypass with demo_mode
}
| /R/aml_simulation.R | permissive | DrRoad/lazytrade | R | false | false | 11,907 | r | #' Function to simulate multiple input structures
#'
#' @description Function is designed to evaluate several different inputs.
#'
#' `r lifecycle::badge('experimental')`
#'
#' @details Function is using several other functions to perform sets of operations
#' designed to test several inputs. Designed to validate model settings
#'
#' @author (C) 2021 Vladimir Zhbanko
#'
#' @param timeframe Integer, Data timeframe e.g. 60 min. This will be equal to 1 bar
#' @param path_sim_input String, Path to the folder where csv files will be placed, typically AI_RSIADXAUDCAD60.csv
#' @param path_sim_result String, Path to the folder where all results from simulations shall be written
#' @param par_simulate Integer, Parameter that can be used in simulation
#' @param demo_mode Boolean, Simplify function test. When TRUE no simulation will be made
#'
#' @return Function is writing file into Decision Support System folders
#' @export
#'
#' @examples
#'
#' \donttest{
#'
#' library(dplyr)
#' library(magrittr)
#' library(readr)
#' library(h2o)
#' library(lazytrade)
#' library(lubridate)
#' library(stats)
#'
#' path_input <- normalizePath(tempdir(),winslash = "/")
#' path_sim_input <- file.path(path_input, "path_sim_input")
#' dir.create(path_sim_input)
#' path_sim_result <- file.path(path_input, "path_sim_result")
#' dir.create(path_sim_result)
#'
#' file.copy(from = system.file("extdata", "AI_RSIADXCADCHF60.csv", package = "lazytrade"),
#' to = file.path(path_sim_input, "AI_RSIADXCADCHF60.csv"), overwrite = TRUE)
#' file.copy(from = system.file("extdata", "AI_RSIADXEURNZD60.csv", package = "lazytrade"),
#' to = file.path(path_sim_input, "AI_RSIADXEURNZD60.csv"), overwrite = TRUE)
#'
#' # start h2o engine
#' h2o.init(nthreads = 2)
#'
#' # simulation of different epoch values
#' aml_simulation(timeframe = 60,
#' path_sim_input = path_sim_input,
#' path_sim_result = path_sim_result,
#' par_simulate = 3,
#' demo_mode = TRUE)
#'
#' Sys.sleep(5)
#' # stop h2o engine
#' h2o.shutdown(prompt = FALSE)
#'
#' #set delay to insure h2o unit closes properly before the next test
#' Sys.sleep(5)
#'
#' }
#'
#'
#'
aml_simulation <- function(timeframe = 60, path_sim_input, path_sim_result,
par_simulate = 100, demo_mode = FALSE){
requireNamespace("dplyr", quietly = TRUE)
requireNamespace("readr", quietly = TRUE)
requireNamespace("h2o", quietly = TRUE)
# Function should perform simulation for one set of Neural Network Inputs
## Generate subfolders needed for data storage and processing for one set
#path with the data where rds files will be recorded
path_sim_data <- file.path(path_sim_input, "_DATA")
if(!dir.exists(path_sim_data)){dir.create(path_sim_data)}
#path with models
path_sim_models <- file.path(path_sim_input, "_MODELS")
if(!dir.exists(path_sim_models)){dir.create(path_sim_models)}
# 2 dummy paths to drop str test results
path_sim_sbxm <- file.path(path_sim_input, "_SBXM")
if(!dir.exists(path_sim_sbxm)){dir.create(path_sim_sbxm)}
path_sim_sbxs <- file.path(path_sim_input, "_SBXS")
if(!dir.exists(path_sim_sbxs)){dir.create(path_sim_sbxs)}
# =================================
# collect data
# =================================
#### Read inputs ==========================================
# read files for which symbols are actually placed to the folder path_sim_input
myFiles <- list.files(path_sim_input,pattern = "AI_RSIADX", all.files = TRUE)
mySymbols <- stringr::str_remove(myFiles, pattern = "AI_RSIADX")
mySymbols <- stringr::str_remove(mySymbols, pattern = as.character(timeframe))
mySymbols <- stringr::str_remove(mySymbols, pattern = ".csv")
#time frames used
timeframeHP <- timeframe
#copy file with tick size info
tick = system.file("extdata", "TickSize_AI_RSIADX.csv",
package = "lazytrade") %>% read_csv(col_names = FALSE)
write_csv(tick, file.path(path_sim_data, "TickSize_AI_RSIADX.csv"), col_names = FALSE)
# Writing indicator and price change to the file
for (PAIR in mySymbols) {
# PAIR <- mySymbols[1]
# performing data collection
indHP = file.path(path_sim_input, paste0("AI_RSIADX",PAIR,timeframeHP,".csv")) %>%
readr::read_csv(col_names = FALSE)
indHP$X1 <- lubridate::ymd_hms(indHP$X1)
# data transformation using the custom function for one symbol
lazytrade::aml_collect_data(indicator_dataset = indHP,
symbol = PAIR,
timeframe = timeframeHP,
path_data = path_sim_data,
max_nrows = 15000)
#full_path <- file.path(path_data, 'AI_RSIADXEURUSD60.rds')
#x1 <- read_rds(full_path)
}
if(!demo_mode){
# =================================
# force model update
# =================================
#path to store logs data (e.g. duration of machine learning steps)
#h2o.init()
# Writing indicator and price change to the file
for (PAIR in mySymbols) {
## PAIR <- mySymbols[1]
# performing Deep Learning Regression using the custom function
lazytrade::aml_make_model(symbol = PAIR,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
force_update = TRUE,
objective_test = TRUE,
num_nn_options = 48,
num_epoch = par_simulate,
num_bars_test = 600,
num_bars_ahead = 34,
num_cols_used = 10)
}
# =================================
# test build test...
# =================================
# Performing Testing => Building -> Testing...
for (PAIR in mySymbols) {
## PAIR <- mySymbols[1]
# repeat testing and training several times
lazytrade::aml_test_model(symbol = PAIR,
num_bars = 600,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs)
}
perf <- lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.8,
get_quantile = TRUE)
# function to write log to the _LOG folder
lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.75,
get_quantile = FALSE,
log_results = TRUE,
path_logs = path_sim_result)
for (PAIR in mySymbols) {
lazytrade::aml_make_model(symbol = PAIR,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
force_update=FALSE,
objective_test = TRUE,
min_perf = perf,
num_nn_options = 24,
num_epoch = par_simulate,
num_bars_test = 600,
num_bars_ahead = 34,
num_cols_used = 10)
lazytrade::aml_test_model(symbol = PAIR,
num_bars = 600,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs)
}
perf <- lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.5,
get_quantile = TRUE)
# function to write log to the _LOG folder
lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.75,
get_quantile = FALSE,
log_results = TRUE,
path_logs = path_sim_result)
for (PAIR in mySymbols) {
lazytrade::aml_make_model(symbol = PAIR,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
force_update=FALSE,
objective_test = TRUE,
num_nn_options = 24,
num_epoch = par_simulate,
num_bars_test = 600,
num_bars_ahead = 34,
num_cols_used = 10,
min_perf = perf)
lazytrade::aml_test_model(symbol = PAIR,
num_bars = 600,
timeframe = timeframeHP,
path_model = path_sim_models,
path_data = path_sim_data,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs)
}
# stop h2o engine
#h2o.shutdown(prompt = F)
AverPerf <- lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.5,
get_quantile = TRUE)
# function to write log to the _LOG folder
Qntil = lazytrade::aml_consolidate_results(timeframe = timeframeHP,
used_symbols = mySymbols,
path_model = path_sim_models,
path_sbxm = path_sim_sbxm,
path_sbxs = path_sim_sbxs,
min_quality = 0.75,
get_quantile = FALSE,
log_results = TRUE,
path_logs = path_sim_result)
#setup a log dataframe to consolidate results of particular sets
logs <- data.frame(TimeTest = Sys.time(),
Folder = path_sim_input,
MeanPerf = AverPerf, HighPerf = Qntil$Quantil)
#read existing log (if exists) and add there a new log data
if(!file.exists(file.path(path_sim_result, 'all_results.rds'))){
write_rds(logs, file.path(path_sim_result, 'all_results.rds'))
} else {
read_rds(file.path(path_sim_result, 'all_results.rds')) %>%
bind_rows(logs) %>%
write_rds(file.path(path_sim_result, 'all_results.rds'))
}
} #end of test bypass with demo_mode
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1718
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1718
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_nreachq_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 592
c no.of clauses 1718
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1718
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_nreachq_1344.qdimacs 592 1718 E1 [] 0 8 581 1718 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_nreachq_1344/query04_nreachq_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 707 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1718
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1718
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_nreachq_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 592
c no.of clauses 1718
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1718
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_nreachq_1344.qdimacs 592 1718 E1 [] 0 8 581 1718 NONE
|
\name{calcPredictionAccuracy}
\Rdversion{1.1}
\alias{calcPredictionAccuracy}
\alias{calcPredictionAccuracy,realRatingMatrix,realRatingMatrix-method}
\alias{calcPredictionAccuracy,topNList,binaryRatingMatrix-method}
\alias{calcPredictionAccuracy,topNList,realRatingMatrix-method}
\title{
Calculate the Prediction Error for a Recommendation
}
\description{
Calculate prediction accuracy. For predicted ratings
MAE (mean average error), MSE (means squared error)
and RMSE (root means squared error) are calculated. For topNLists
various binary classification metrics are returned (e.g., precision, recall, TPR, FPR).
}
\usage{
calcPredictionAccuracy(x, data, ...)
\S4method{calcPredictionAccuracy}{realRatingMatrix,realRatingMatrix}(x, data, byUser=FALSE,...)
\S4method{calcPredictionAccuracy}{topNList,realRatingMatrix}(x, data, byUser=FALSE, given=NULL, goodRating=NA,...)
\S4method{calcPredictionAccuracy}{topNList,binaryRatingMatrix}(x, data, byUser=FALSE, given=NULL,...)
}
\arguments{
\item{x}{ Predicted items in a "topNList" or predicted ratings as a "realRatingMatrix"}
\item{data}{ Observed true ratings for the users as a "RatingMatrix". The users have to be in the same order as in \code{x}. }
\item{byUser}{ logical; Should the errors be averaged by user or over all
recommendations?}
\item{given}{ how many items were given to create the predictions. }
\item{goodRating}{ If \code{x} is a "topNList" and \code{data} is a "realRatingMatrix" then \code{goodRating} is used as the threshold for determining what rating in \code{data} is considered a good rating.}
\item{...}{ further arguments.}
}
\details{
The function calculates the accuracy of predictions compared to the observed true ratings (\code{data}).
If both, the predictions and the actual observed ratings are numeric ratings (i.e. a "realRatingMatrix"),
then the error measures RMSE, MSE and MAE are returned.
If the predictions are a "topNList" and the observed data is a "binaryRatingMatrix", then binary classification
measures like precision, recall, TPR and FPR are calculated.
If the ratings are a "topNList" and the observed data is a "realRatingMatrix" then \code{goodRating} is used
to determine what rating in \code{data} is considered a good rating for calculating binary classification measures. This means that an item in the topNList is considered a true positive if it has a rating of \code{goodRating} or better in the observed data.
}
\value{
Returns a vector with the appropriate measures averaged over all users.
For \code{byUser=TRUE}, a matrix with a row for each user is returned.
}
\seealso{
\code{\linkS4class{topNList}},
\code{\linkS4class{binaryRatingMatrix}},
\code{\linkS4class{realRatingMatrix}}.
}
\references{
Asela Gunawardana and Guy Shani (2009). A Survey of Accuracy Evaluation Metrics of
Recommendation Tasks, Journal of Machine Learning Research 10, 2935-2962.
}
\examples{
### real valued recommender
data(Jester5k)
## create 90/10 split (known/unknown) for the first 500 users in Jester5k
e <- evaluationScheme(Jester5k[1:500,], method="split", train=0.9,
k=1, given=15)
e
## create a user-based CF recommender using training data
r <- Recommender(getData(e, "train"), "UBCF")
## create predictions for the test data using known ratings (see given above)
p <- predict(r, getData(e, "known"), type="ratings")
p
## compute error metrics averaged per user and then averaged over all
## recommendations
calcPredictionAccuracy(p, getData(e, "unknown"))
head(calcPredictionAccuracy(p, getData(e, "unknown"), byUser=TRUE))
## evaluate topNLists instead (you need to specify given and goodRating!)
p <- predict(r, getData(e, "known"), type="topNList")
p
calcPredictionAccuracy(p, getData(e, "unknown"), given=15, goodRating=5)
## evaluate a binary recommender
data(MSWeb)
MSWeb10 <- sample(MSWeb[rowCounts(MSWeb) >10,], 50)
e <- evaluationScheme(MSWeb10, method="split", train=0.9,
k=1, given=3)
e
## create a user-based CF recommender using training data
r <- Recommender(getData(e, "train"), "UBCF")
## create predictions for the test data using known ratings (see given above)
p <- predict(r, getData(e, "known"), type="topNList", n=10)
p
calcPredictionAccuracy(p, getData(e, "unknown"), given=3)
}
%\keyword{ ~kwd1 }
| /man/calcPredictionAccuracy.Rd | no_license | r0y0u/recommenderlab | R | false | false | 4,270 | rd | \name{calcPredictionAccuracy}
\Rdversion{1.1}
\alias{calcPredictionAccuracy}
\alias{calcPredictionAccuracy,realRatingMatrix,realRatingMatrix-method}
\alias{calcPredictionAccuracy,topNList,binaryRatingMatrix-method}
\alias{calcPredictionAccuracy,topNList,realRatingMatrix-method}
\title{
Calculate the Prediction Error for a Recommendation
}
\description{
Calculate prediction accuracy. For predicted ratings
MAE (mean average error), MSE (means squared error)
and RMSE (root means squared error) are calculated. For topNLists
various binary classification metrics are returned (e.g., precision, recall, TPR, FPR).
}
\usage{
calcPredictionAccuracy(x, data, ...)
\S4method{calcPredictionAccuracy}{realRatingMatrix,realRatingMatrix}(x, data, byUser=FALSE,...)
\S4method{calcPredictionAccuracy}{topNList,realRatingMatrix}(x, data, byUser=FALSE, given=NULL, goodRating=NA,...)
\S4method{calcPredictionAccuracy}{topNList,binaryRatingMatrix}(x, data, byUser=FALSE, given=NULL,...)
}
\arguments{
\item{x}{ Predicted items in a "topNList" or predicted ratings as a "realRatingMatrix"}
\item{data}{ Observed true ratings for the users as a "RatingMatrix". The users have to be in the same order as in \code{x}. }
\item{byUser}{ logical; Should the errors be averaged by user or over all
recommendations?}
\item{given}{ how many items were given to create the predictions. }
\item{goodRating}{ If \code{x} is a "topNList" and \code{data} is a "realRatingMatrix" then \code{goodRating} is used as the threshold for determining what rating in \code{data} is considered a good rating.}
\item{...}{ further arguments.}
}
\details{
The function calculates the accuracy of predictions compared to the observed true ratings (\code{data}).
If both, the predictions and the actual observed ratings are numeric ratings (i.e. a "realRatingMatrix"),
then the error measures RMSE, MSE and MAE are returned.
If the predictions are a "topNList" and the observed data is a "binaryRatingMatrix", then binary classification
measures like precision, recall, TPR and FPR are calculated.
If the ratings are a "topNList" and the observed data is a "realRatingMatrix" then \code{goodRating} is used
to determine what rating in \code{data} is considered a good rating for calculating binary classification measures. This means that an item in the topNList is considered a true positive if it has a rating of \code{goodRating} or better in the observed data.
}
\value{
Returns a vector with the appropriate measures averaged over all users.
For \code{byUser=TRUE}, a matrix with a row for each user is returned.
}
\seealso{
\code{\linkS4class{topNList}},
\code{\linkS4class{binaryRatingMatrix}},
\code{\linkS4class{realRatingMatrix}}.
}
\references{
Asela Gunawardana and Guy Shani (2009). A Survey of Accuracy Evaluation Metrics of
Recommendation Tasks, Journal of Machine Learning Research 10, 2935-2962.
}
\examples{
### real valued recommender
data(Jester5k)
## create 90/10 split (known/unknown) for the first 500 users in Jester5k
e <- evaluationScheme(Jester5k[1:500,], method="split", train=0.9,
k=1, given=15)
e
## create a user-based CF recommender using training data
r <- Recommender(getData(e, "train"), "UBCF")
## create predictions for the test data using known ratings (see given above)
p <- predict(r, getData(e, "known"), type="ratings")
p
## compute error metrics averaged per user and then averaged over all
## recommendations
calcPredictionAccuracy(p, getData(e, "unknown"))
head(calcPredictionAccuracy(p, getData(e, "unknown"), byUser=TRUE))
## evaluate topNLists instead (you need to specify given and goodRating!)
p <- predict(r, getData(e, "known"), type="topNList")
p
calcPredictionAccuracy(p, getData(e, "unknown"), given=15, goodRating=5)
## evaluate a binary recommender
data(MSWeb)
MSWeb10 <- sample(MSWeb[rowCounts(MSWeb) >10,], 50)
e <- evaluationScheme(MSWeb10, method="split", train=0.9,
k=1, given=3)
e
## create a user-based CF recommender using training data
r <- Recommender(getData(e, "train"), "UBCF")
## create predictions for the test data using known ratings (see given above)
p <- predict(r, getData(e, "known"), type="topNList", n=10)
p
calcPredictionAccuracy(p, getData(e, "unknown"), given=3)
}
%\keyword{ ~kwd1 }
|
library(numbersBR)
### Name: is.valid
### Title: Validate numbers
### Aliases: is.valid is.valid.CNPJ is.valid.CPF is.valid.RENAVAN is.valid
### ** Examples
is.valid(CNPJ(c(13515463000138, 66670000101))) # TRUE, FALSE
is.valid(CPF(c(1239157673, 42752486198))) # TRUE, FALSE
is.valid(RENAVAN(c(75320797785, 42752486198))) # TRUE, FALSE
| /data/genthat_extracted_code/numbersBR/examples/is.valid.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 342 | r | library(numbersBR)
### Name: is.valid
### Title: Validate numbers
### Aliases: is.valid is.valid.CNPJ is.valid.CPF is.valid.RENAVAN is.valid
### ** Examples
is.valid(CNPJ(c(13515463000138, 66670000101))) # TRUE, FALSE
is.valid(CPF(c(1239157673, 42752486198))) # TRUE, FALSE
is.valid(RENAVAN(c(75320797785, 42752486198))) # TRUE, FALSE
|
library(data.table)
library(zoo)
library(rstudioapi)
library(tidyverse)
# Set the working directory to the script directory
setwd (dirname(getActiveDocumentContext()$path))
# Set Date Language in English
Sys.setlocale("LC_TIME", "C")
source("Factors.R")
portfolio_returns <- as.data.table(portfolio_returns)
+create_quintiles <- function(data, factor) {
hlpvariable2 <- data[month == 7 & !is.na(data[[factor]]) & pf.size == "Big"] %>% group_by(country, year) %>%
summarize(bb20 = quantile(.data[[factor]], probs = c(0.2), na.rm = T),
bb40 = quantile(.data[[factor]], probs = c(0.4), na.rm = T),
bb60 = quantile(.data[[factor]], probs = c(0.6), na.rm = T),
bb80 = quantile(.data[[factor]], probs = c(0.8), na.rm = T)) %>%
select(year, country, bb20, bb40, bb60, bb80)
factors <- merge(data,hlpvariable2,
by.x=c("hcjun", "country"),
by.y=c("year", "country"))
}
# Assign each stock to the respective quintile
create_breakpoints <- function(data, factor) {
data[ , pf.bm := ifelse(data[[factor]]>bb80, "Big",
ifelse((data[[factor]]<=bb80 & data[[factor]]>bb60),"LessBig",
ifelse((data[[factor]]<=bb60 & data[[factor]]>bb40),"Neutral",
ifelse((data[[factor]]<=bb40 & data[[factor]]>bb20),"LessSmall",
ifelse(data[[factor]]<=bb20,"Small",NA)))))]
}
# Calculate the value-weighted returns for the different factor portfolios
calculate_factor_returns <- function(data, empty_df, factor) {
portfolio_returns <- data[!is.na(pf.bm)] %>%
group_by(Date, pf.bm) %>%
summarize(ret.port = weighted.mean(RET.USD, MV.USD.June)) %>%
spread(pf.bm, ret.port) %>% mutate(hedge.pf = Big - Small) %>%
rename("5" = Big, "4" = LessBig, "3" = Neutral,"2" = LessSmall, "1" = Small, "5-1" = hedge.pf) %>%
select(Date, "1", "2", "3", "4", "5", "5-1")
portfolio_returns <- as.data.table(portfolio_returns)
factor_returns <- colSums(portfolio_returns[,2:7], na.rm = T) / nrow(portfolio_returns)
empty_df <- rbind(empty_df, factor_returns)
empty_df
}
create_portfolio_sorts <- function(data, factor, empty_df) {
factor_return <- create_quintiles(data, factor)
factor_return <- create_breakpoints(factor_return, factor)
empty_df <- calculate_factor_returns(factor_return, empty_df, factor)
}
# Create empty dataframe to display results
cols = c("1", "2", "3", "4", "5", "5-1")
portfolio_returns <- data.frame(matrix(nrow = 0, ncol = length(cols)))
# Exclude the stocks that have negative earnings, cash flows or gross profits
# We start with Book-To-Market and we only consider stocks with positive BM
bm_factor <- factors %>% filter(BM>0)
portfolio_returns <- create_portfolio_sorts(bm_factor, "BM", portfolio_returns)
# Filter out stocks with negative earnings
ep_factor <- factors %>% filter(EP > 0) %>% drop_na(EP)
portfolio_returns <- create_portfolio_sorts(ep_factor, "EP", portfolio_returns)
# Filter out stocks with negative CF
cp_factor <- factors %>% filter(CP > 0) %>% drop_na(CP)
portfolio_returns <- create_portfolio_sorts(cp_factor, "CP", portfolio_returns)
roe_factor <- factors %>% filter(ROE > 0)
portfolio_returns <- create_portfolio_sorts(roe_factor, "ROE", portfolio_returns)
roa_factor <- factors %>% filter(ROA > 0)
portfolio_returns <- create_portfolio_sorts(roa_factor, "ROA", portfolio_returns)
gpa_factor <- factors %>% filter(GPA > 0)
portfolio_returns <- create_portfolio_sorts(gpa_factor, "GPA", portfolio_returns)
opbe_factor <- factors %>% filter(OPBE > 0)
portfolio_returns <- create_portfolio_sorts(opbe_factor, "OPBE", portfolio_returns)
oa_factor <- factors %>% drop_na(OA)
portfolio_returns <- create_portfolio_sorts(oa_factor, "OA", portfolio_returns)
colnames(portfolio_returns) <- cols
rows <- c("BM", "EP", "CP", "ROE", "ROA", "GP/A", "OP/BE", "OA")
rownames(portfolio_returns) <- rows
#ttest
CS.reg.estimtates.contr.2[,t.test(gamma_zero)]
| /Ttest.R | no_license | Antoniodigiovanni/FAT-Cases-in-Finance | R | false | false | 4,062 | r | library(data.table)
library(zoo)
library(rstudioapi)
library(tidyverse)
# Set the working directory to the script directory
setwd (dirname(getActiveDocumentContext()$path))
# Set Date Language in English
Sys.setlocale("LC_TIME", "C")
source("Factors.R")
portfolio_returns <- as.data.table(portfolio_returns)
+create_quintiles <- function(data, factor) {
hlpvariable2 <- data[month == 7 & !is.na(data[[factor]]) & pf.size == "Big"] %>% group_by(country, year) %>%
summarize(bb20 = quantile(.data[[factor]], probs = c(0.2), na.rm = T),
bb40 = quantile(.data[[factor]], probs = c(0.4), na.rm = T),
bb60 = quantile(.data[[factor]], probs = c(0.6), na.rm = T),
bb80 = quantile(.data[[factor]], probs = c(0.8), na.rm = T)) %>%
select(year, country, bb20, bb40, bb60, bb80)
factors <- merge(data,hlpvariable2,
by.x=c("hcjun", "country"),
by.y=c("year", "country"))
}
# Assign each stock to the respective quintile
create_breakpoints <- function(data, factor) {
data[ , pf.bm := ifelse(data[[factor]]>bb80, "Big",
ifelse((data[[factor]]<=bb80 & data[[factor]]>bb60),"LessBig",
ifelse((data[[factor]]<=bb60 & data[[factor]]>bb40),"Neutral",
ifelse((data[[factor]]<=bb40 & data[[factor]]>bb20),"LessSmall",
ifelse(data[[factor]]<=bb20,"Small",NA)))))]
}
# Calculate the value-weighted returns for the different factor portfolios
calculate_factor_returns <- function(data, empty_df, factor) {
portfolio_returns <- data[!is.na(pf.bm)] %>%
group_by(Date, pf.bm) %>%
summarize(ret.port = weighted.mean(RET.USD, MV.USD.June)) %>%
spread(pf.bm, ret.port) %>% mutate(hedge.pf = Big - Small) %>%
rename("5" = Big, "4" = LessBig, "3" = Neutral,"2" = LessSmall, "1" = Small, "5-1" = hedge.pf) %>%
select(Date, "1", "2", "3", "4", "5", "5-1")
portfolio_returns <- as.data.table(portfolio_returns)
factor_returns <- colSums(portfolio_returns[,2:7], na.rm = T) / nrow(portfolio_returns)
empty_df <- rbind(empty_df, factor_returns)
empty_df
}
create_portfolio_sorts <- function(data, factor, empty_df) {
factor_return <- create_quintiles(data, factor)
factor_return <- create_breakpoints(factor_return, factor)
empty_df <- calculate_factor_returns(factor_return, empty_df, factor)
}
# Create empty dataframe to display results
cols = c("1", "2", "3", "4", "5", "5-1")
portfolio_returns <- data.frame(matrix(nrow = 0, ncol = length(cols)))
# Exclude the stocks that have negative earnings, cash flows or gross profits
# We start with Book-To-Market and we only consider stocks with positive BM
bm_factor <- factors %>% filter(BM>0)
portfolio_returns <- create_portfolio_sorts(bm_factor, "BM", portfolio_returns)
# Filter out stocks with negative earnings
ep_factor <- factors %>% filter(EP > 0) %>% drop_na(EP)
portfolio_returns <- create_portfolio_sorts(ep_factor, "EP", portfolio_returns)
# Filter out stocks with negative CF
cp_factor <- factors %>% filter(CP > 0) %>% drop_na(CP)
portfolio_returns <- create_portfolio_sorts(cp_factor, "CP", portfolio_returns)
roe_factor <- factors %>% filter(ROE > 0)
portfolio_returns <- create_portfolio_sorts(roe_factor, "ROE", portfolio_returns)
roa_factor <- factors %>% filter(ROA > 0)
portfolio_returns <- create_portfolio_sorts(roa_factor, "ROA", portfolio_returns)
gpa_factor <- factors %>% filter(GPA > 0)
portfolio_returns <- create_portfolio_sorts(gpa_factor, "GPA", portfolio_returns)
opbe_factor <- factors %>% filter(OPBE > 0)
portfolio_returns <- create_portfolio_sorts(opbe_factor, "OPBE", portfolio_returns)
oa_factor <- factors %>% drop_na(OA)
portfolio_returns <- create_portfolio_sorts(oa_factor, "OA", portfolio_returns)
colnames(portfolio_returns) <- cols
rows <- c("BM", "EP", "CP", "ROE", "ROA", "GP/A", "OP/BE", "OA")
rownames(portfolio_returns) <- rows
#ttest
CS.reg.estimtates.contr.2[,t.test(gamma_zero)]
|
library(samplesize4surveys)
### Name: BigLucyT0T1
### Title: Some Business Population Database for two periods of time
### Aliases: BigLucyT0T1
### Keywords: datasets
### ** Examples
data(Lucy)
attach(Lucy)
# The variables of interest are: Income, Employees and Taxes
# This information is stored in a data frame called estima
estima <- data.frame(Income, Employees, Taxes)
# The population totals
colSums(estima)
# Some parameters of interest
table(SPAM,Level)
xtabs(Income ~ Level+SPAM)
# Correlations among characteristics of interest
cor(estima)
# Some useful histograms
hist(Income)
hist(Taxes)
hist(Employees)
# Some useful plots
boxplot(Income ~ Level)
barplot(table(Level))
pie(table(SPAM))
| /data/genthat_extracted_code/samplesize4surveys/examples/BigLucyT0T1.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 706 | r | library(samplesize4surveys)
### Name: BigLucyT0T1
### Title: Some Business Population Database for two periods of time
### Aliases: BigLucyT0T1
### Keywords: datasets
### ** Examples
data(Lucy)
attach(Lucy)
# The variables of interest are: Income, Employees and Taxes
# This information is stored in a data frame called estima
estima <- data.frame(Income, Employees, Taxes)
# The population totals
colSums(estima)
# Some parameters of interest
table(SPAM,Level)
xtabs(Income ~ Level+SPAM)
# Correlations among characteristics of interest
cor(estima)
# Some useful histograms
hist(Income)
hist(Taxes)
hist(Employees)
# Some useful plots
boxplot(Income ~ Level)
barplot(table(Level))
pie(table(SPAM))
|
#work with iris dataset. we shall predict Petal.Width
setwd("~/OneDrive/shared files/Statslearningcourse/multi step regression")
library("plyr")
library("dplyr")
library('magrittr')
#predict Petal.Width using lm, find RMSE
RMSE<- function(answer, guess){
error= answer-guess
root.error= sqrt(mean(error^2))
print(root.error)
}
#standard method with all vars in
pairs(~., data=iris)
firstup<- lm(Petal.Width~ Petal.Length + Sepal.Width + Sepal.Length + Species, data=iris)
rollers<- predict(firstup, iris)
RMSE(iris$Petal.Width, rollers)
#enforce no complete cases
#Petal.Length can stay full and Sepal.Width
#Sepal.Length and Species will both lose half their data
reduce<- iris
NAs<- sample(nrow(reduce), round(1/2*nrow(reduce),0), replace=F)
reduce$Sepal.Length[NAs]<- NA
reduce$Species[-NAs]<- NA
summary(reduce)
#now fit full variables, followed by incomplete variables individually
#full variables
first<- lm(Petal.Width~Petal.Length + Sepal.Width, data=reduce)
summary(first)
plot(Petal.Width~Petal.Length + Sepal.Width, data=reduce)
abline(first)
#Sepal.Length - half NA
second<- lm(Petal.Width ~ Sepal.Length, data=reduce)
summary(second)
plot(Petal.Width ~ Sepal.Length, data=reduce)
abline(second)
#Species - half missing
# 3 separate species
summary(reduce$Species)
third<- lm(Petal.Width ~ Species, data=reduce)
summary(third)
plot(Petal.Width ~ Species, data=reduce)
#compile results of all lm's
ans<- NULL
ans$answer<- iris$Petal.Width
ans<- as.data.frame(ans)
ans$first.lm<- predict(first, iris)
ans$second.lm<- predict(second, iris)
ans$third.lm<- predict(third, iris)
#check RMSE's
RMSE(ans$answer, ans$first.lm)
RMSE(ans$answer, ans$second.lm)
RMSE(ans$answer, ans$third.lm)
#now combine answers from first.lm, second.lm, third.lm to find weights
final.lm<- lm(answer~first.lm+second.lm+third.lm, data=ans)
final<- predict(final.lm, ans)
ans$final.lm<- final
RMSE(ans$answer, ans$final.lm)
| /Statslearningcourse/multi step regression/separte_regs_iris.R | no_license | amycook/cheatsheet | R | false | false | 1,938 | r | #work with iris dataset. we shall predict Petal.Width
setwd("~/OneDrive/shared files/Statslearningcourse/multi step regression")
library("plyr")
library("dplyr")
library('magrittr')
#predict Petal.Width using lm, find RMSE
RMSE<- function(answer, guess){
error= answer-guess
root.error= sqrt(mean(error^2))
print(root.error)
}
#standard method with all vars in
pairs(~., data=iris)
firstup<- lm(Petal.Width~ Petal.Length + Sepal.Width + Sepal.Length + Species, data=iris)
rollers<- predict(firstup, iris)
RMSE(iris$Petal.Width, rollers)
#enforce no complete cases
#Petal.Length can stay full and Sepal.Width
#Sepal.Length and Species will both lose half their data
reduce<- iris
NAs<- sample(nrow(reduce), round(1/2*nrow(reduce),0), replace=F)
reduce$Sepal.Length[NAs]<- NA
reduce$Species[-NAs]<- NA
summary(reduce)
#now fit full variables, followed by incomplete variables individually
#full variables
first<- lm(Petal.Width~Petal.Length + Sepal.Width, data=reduce)
summary(first)
plot(Petal.Width~Petal.Length + Sepal.Width, data=reduce)
abline(first)
#Sepal.Length - half NA
second<- lm(Petal.Width ~ Sepal.Length, data=reduce)
summary(second)
plot(Petal.Width ~ Sepal.Length, data=reduce)
abline(second)
#Species - half missing
# 3 separate species
summary(reduce$Species)
third<- lm(Petal.Width ~ Species, data=reduce)
summary(third)
plot(Petal.Width ~ Species, data=reduce)
#compile results of all lm's
ans<- NULL
ans$answer<- iris$Petal.Width
ans<- as.data.frame(ans)
ans$first.lm<- predict(first, iris)
ans$second.lm<- predict(second, iris)
ans$third.lm<- predict(third, iris)
#check RMSE's
RMSE(ans$answer, ans$first.lm)
RMSE(ans$answer, ans$second.lm)
RMSE(ans$answer, ans$third.lm)
#now combine answers from first.lm, second.lm, third.lm to find weights
final.lm<- lm(answer~first.lm+second.lm+third.lm, data=ans)
final<- predict(final.lm, ans)
ans$final.lm<- final
RMSE(ans$answer, ans$final.lm)
|
#' This script simulates data using familial frailty model.
#' We use the following variation: gamma(mu, ssq), where mu is the mean and ssq is sigma square.
#' See: https://www.rocscience.com/help/swedge/webhelp/swedge/Gamma_Distribution.htm
#' @param N Number of individuals.
#' @param f a list of formulas that define age (time) - dependency. Default: list(at="a", f1t="f1", Qt="Q*exp(theta*t)", ft="f", bt="b", mu0t="mu0*exp(theta*t)")
#' @param step An interval between two observations, a random uniformally-distributed value is then added to this step.
#' @param tstart Starting time (age).
#' Can be a number (30 by default) or a vector of two numbers: c(a, b) - in this case, starting value of time
#' is simulated via uniform(a,b) distribution.
#' @param tend A number, defines final time (105 by default).
#' @param ystart A starting value of covariates.
#' @param sd0 A standard deviation for modelling the next covariate value, sd0 = 1 by default.
#' @param nobs A number of observations (lines) for individual observations.
#' @param gamma_mu A parameter which is a mean value, default = 1
#' @param gamma_ssq A sigma squared, default = 0.5.
#' @return A table with simulated data.
#'@references Yashin, A. et al (2007), Health decline, aging and mortality: how are they related?
#'Biogerontology, 8(3), 291-302.<DOI:10.1007/s10522-006-9073-3>.
#'@export
#' @examples
#' library(stpm)
#' dat <- simdata_gamma_frailty(N=10)
#' head(dat)
#'
simdata_gamma_frailty <- function(N=10,f=list(at="-0.05", f1t="80", Qt="2e-8", ft="80", bt="5", mu0t="1e-3"),
step=1, tstart=30, tend=105, ystart=80, sd0=1, nobs=NULL, gamma_mu=1, gamma_ssq=0.5) {
simGamma <- function(gamma_mu, gamma_ssq) {
#Generate a plot of gamma distributions.
# https://www.rocscience.com/help/swedge/webhelp/swedge/Gamma_Distribution.htm
aplha <- (gamma_mu)^2/gamma_ssq # shape
betta <- gamma_ssq/gamma_mu # scale
simgamma <- dgamma(1, shape=aplha, scale = betta) #Make probability density function for default gamma distribution
simgamma
}
formulas <- f
at <- NULL
f1t <- NULL
Qt <- NULL
ft <- NULL
bt <- NULL
mu0t <- NULL
comp_func_params <- function(astring, f1string, qstring, fstring, bstring, mu0string) {
at <<- eval(bquote(function(t) .(parse(text = astring)[[1]])))
f1t <<- eval(bquote(function(t) .(parse(text = f1string)[[1]])))
Qt <<- eval(bquote(function(t) .(parse(text = qstring)[[1]])))
ft <<- eval(bquote(function(t) .(parse(text = fstring)[[1]])))
bt <<- eval(bquote(function(t) .(parse(text = bstring)[[1]])))
mu0t <<- eval(bquote(function(t) .(parse(text = mu0string)[[1]])))
}
sigma_sq <- function(t1, t2) {
# t2 = t_{j}, t1 = t_{j-1}
ans <- bt(t1)*(t2-t1)
ans
}
m <- function(y, t1, t2) {
# y = y_{j-1}, t1 = t_{j-1}, t2 = t_{j}
ans <- y + at(t1)*(y - f1t(t1))*(t2 - t1)
ans
}
mu <- function(y, t, z) {
ans <- z*mu0t(t) + (y - ft(t))^2*Qt(t)
}
comp_func_params(formulas$at, formulas$f1t, formulas$Qt, formulas$ft, formulas$bt, formulas$mu0t)
data <- matrix(nrow=1,ncol=6, NA)
record <- 1
id <- 0
famid <- 0
for(i in 1:N/2) { # N must be even
# Compute Z:
Z <- simGamma(gamma_mu, gamma_ssq)
for(ii in 1:2) {
if(length(tstart) == 1) {
t2 <- tstart # Starting time
} else if(length(tstart) == 2){
t2 <- runif(1,tstart[1], tstart[2]) # Starting time
} else {
stop(paste("Incorrect tstart:", tstart))
}
# Starting point
new_person <- FALSE
y2 <- rnorm(1,mean=ystart, sd=sd0)
n_observ <- 0
while(new_person == FALSE){
t1 <- t2
t2 <- t1 + runif(1,-step/10,step/10) + step
y1 <- y2
S <- exp(-1*mu(y1, t1, Z)*(t2-t1))
xi <- 0
if (S > runif(1,0,1)) {
xi <- 0
y2 <- rnorm(1,mean=m(y1, t1, t2), sd=sqrt(sigma_sq(t1,t2)))
new_person <- FALSE
cov <- c(y1, y2)
data <- rbind(data, c(id, xi, t1, t2, cov))
} else {
xi <- 1
y2 <- NA
new_person <- TRUE
cov <- c(y1, y2)
data <- rbind(data, c(id, xi, t1, t2, cov))
}
n_observ <- n_observ + 1
if(!is.null(nobs)) {
if(n_observ == nobs) {
new_person <- TRUE;
}
}
if(t2 > tend & new_person == FALSE) {
new_person <- TRUE
}
}
id <- id + 1
}
famid <- famid + 1
}
# One last step:
data <- data[2:dim(data)[1],]
colnames(data) <- c("id","xi","t1","t2", "y", "y.next")
invisible(data.frame(data))
} | /fuzzedpackages/stpm/R/simdata_gamma_frailty.R | no_license | akhikolla/testpackages | R | false | false | 4,716 | r | #' This script simulates data using familial frailty model.
#' We use the following variation: gamma(mu, ssq), where mu is the mean and ssq is sigma square.
#' See: https://www.rocscience.com/help/swedge/webhelp/swedge/Gamma_Distribution.htm
#' @param N Number of individuals.
#' @param f a list of formulas that define age (time) - dependency. Default: list(at="a", f1t="f1", Qt="Q*exp(theta*t)", ft="f", bt="b", mu0t="mu0*exp(theta*t)")
#' @param step An interval between two observations, a random uniformally-distributed value is then added to this step.
#' @param tstart Starting time (age).
#' Can be a number (30 by default) or a vector of two numbers: c(a, b) - in this case, starting value of time
#' is simulated via uniform(a,b) distribution.
#' @param tend A number, defines final time (105 by default).
#' @param ystart A starting value of covariates.
#' @param sd0 A standard deviation for modelling the next covariate value, sd0 = 1 by default.
#' @param nobs A number of observations (lines) for individual observations.
#' @param gamma_mu A parameter which is a mean value, default = 1
#' @param gamma_ssq A sigma squared, default = 0.5.
#' @return A table with simulated data.
#'@references Yashin, A. et al (2007), Health decline, aging and mortality: how are they related?
#'Biogerontology, 8(3), 291-302.<DOI:10.1007/s10522-006-9073-3>.
#'@export
#' @examples
#' library(stpm)
#' dat <- simdata_gamma_frailty(N=10)
#' head(dat)
#'
simdata_gamma_frailty <- function(N=10,f=list(at="-0.05", f1t="80", Qt="2e-8", ft="80", bt="5", mu0t="1e-3"),
step=1, tstart=30, tend=105, ystart=80, sd0=1, nobs=NULL, gamma_mu=1, gamma_ssq=0.5) {
simGamma <- function(gamma_mu, gamma_ssq) {
#Generate a plot of gamma distributions.
# https://www.rocscience.com/help/swedge/webhelp/swedge/Gamma_Distribution.htm
aplha <- (gamma_mu)^2/gamma_ssq # shape
betta <- gamma_ssq/gamma_mu # scale
simgamma <- dgamma(1, shape=aplha, scale = betta) #Make probability density function for default gamma distribution
simgamma
}
formulas <- f
at <- NULL
f1t <- NULL
Qt <- NULL
ft <- NULL
bt <- NULL
mu0t <- NULL
comp_func_params <- function(astring, f1string, qstring, fstring, bstring, mu0string) {
at <<- eval(bquote(function(t) .(parse(text = astring)[[1]])))
f1t <<- eval(bquote(function(t) .(parse(text = f1string)[[1]])))
Qt <<- eval(bquote(function(t) .(parse(text = qstring)[[1]])))
ft <<- eval(bquote(function(t) .(parse(text = fstring)[[1]])))
bt <<- eval(bquote(function(t) .(parse(text = bstring)[[1]])))
mu0t <<- eval(bquote(function(t) .(parse(text = mu0string)[[1]])))
}
sigma_sq <- function(t1, t2) {
# t2 = t_{j}, t1 = t_{j-1}
ans <- bt(t1)*(t2-t1)
ans
}
m <- function(y, t1, t2) {
# y = y_{j-1}, t1 = t_{j-1}, t2 = t_{j}
ans <- y + at(t1)*(y - f1t(t1))*(t2 - t1)
ans
}
mu <- function(y, t, z) {
ans <- z*mu0t(t) + (y - ft(t))^2*Qt(t)
}
comp_func_params(formulas$at, formulas$f1t, formulas$Qt, formulas$ft, formulas$bt, formulas$mu0t)
data <- matrix(nrow=1,ncol=6, NA)
record <- 1
id <- 0
famid <- 0
for(i in 1:N/2) { # N must be even
# Compute Z:
Z <- simGamma(gamma_mu, gamma_ssq)
for(ii in 1:2) {
if(length(tstart) == 1) {
t2 <- tstart # Starting time
} else if(length(tstart) == 2){
t2 <- runif(1,tstart[1], tstart[2]) # Starting time
} else {
stop(paste("Incorrect tstart:", tstart))
}
# Starting point
new_person <- FALSE
y2 <- rnorm(1,mean=ystart, sd=sd0)
n_observ <- 0
while(new_person == FALSE){
t1 <- t2
t2 <- t1 + runif(1,-step/10,step/10) + step
y1 <- y2
S <- exp(-1*mu(y1, t1, Z)*(t2-t1))
xi <- 0
if (S > runif(1,0,1)) {
xi <- 0
y2 <- rnorm(1,mean=m(y1, t1, t2), sd=sqrt(sigma_sq(t1,t2)))
new_person <- FALSE
cov <- c(y1, y2)
data <- rbind(data, c(id, xi, t1, t2, cov))
} else {
xi <- 1
y2 <- NA
new_person <- TRUE
cov <- c(y1, y2)
data <- rbind(data, c(id, xi, t1, t2, cov))
}
n_observ <- n_observ + 1
if(!is.null(nobs)) {
if(n_observ == nobs) {
new_person <- TRUE;
}
}
if(t2 > tend & new_person == FALSE) {
new_person <- TRUE
}
}
id <- id + 1
}
famid <- famid + 1
}
# One last step:
data <- data[2:dim(data)[1],]
colnames(data) <- c("id","xi","t1","t2", "y", "y.next")
invisible(data.frame(data))
} |
library(tidyverse)
# function to fit the aov and run the multiple comparisons
run_multicomp <- function(data, group_var, continuous_var) {
# to make this work in a pipe / make my life more difficult, I'm using quasiquotation here
lhs <- pull(data, {{ continuous_var }})
rhs <- as.factor(pull(data, {{ group_var }}))
# first, fit an anova
anova_result <- aov(lhs ~ rhs)
# run multiple comparisons test, return the result
multcomp::glht(model = anova_result,
linfct = multcomp::mcp(rhs = "Tukey"))
}
# function to grab the letters from the multiple comparisons result
grab_letters_df <- function(glht_obj, group_var, level = 0.05) {
# multcomp::cld generates lots of warnings that can be safely ignored
# a quietly function will capture the warnings and let us decide which we want to pass forwards
quiet_cld <- quietly(multcomp::cld)
cld_result <- glht_obj %>%
quiet_cld(level = level)
# check warnings, send along any that don't match our criteria to ignore
cld_warnings <- cld_result$warnings
if (length(cld_warnings) > 0) {
cld_warnings_to_raise <- cld_warnings[!str_starts(cld_warnings, "Completion with")]
if (length(cld_warnings_to_raise) > 0) warning(cld_warnings_to_raise)
}
# if the warnings weren't bad, keep the analysis going
mcletters <- cld_result %>%
pluck("result", "mcletters")
# we only really want a couple items out of this thing,
# make a data frame out of those including a column for the original treatment group
tibble({{group_var}} := names(mcletters$Letters),
letters = mcletters$Letters,
letters_mono = mcletters$monospacedLetters)
}
# and a function to run the whole thing:
# take an input dataset, run the multiple comparisons, and grab the letters as a data frame
make_multicomp_letters <- function(data, group_var, continuous_var, level = 0.05) {
# first, run the multiple comparisons anova
glht_result <- run_multicomp(data,
group_var = {{ group_var }},
continuous_var = {{ continuous_var }})
# then grab the letters and identify if
grab_letters_df(glht_result,
group_var = {{ group_var }},
level = level) %>%
mutate(significance = sig_check(letters))
}
sig_check <- function(letters) {
uniques <- unique(letters)
# if all the groups have the same letter, there is no significant difference in the data
if (length(uniques) == 1) {
return("No pairwise differences are significant")
}
# if each group has a single unique letter, then all pairs of differences are significant
if (length(letters) == length(uniques) & all(str_length(letters) == 1)) {
return("All pairwise differences are significant")
} else {
return("Some pairwise differences are significant")
}
}
| /99_letters-functions.R | no_license | davisadamw/buncha-tukeys | R | false | false | 2,884 | r | library(tidyverse)
# function to fit the aov and run the multiple comparisons
run_multicomp <- function(data, group_var, continuous_var) {
# to make this work in a pipe / make my life more difficult, I'm using quasiquotation here
lhs <- pull(data, {{ continuous_var }})
rhs <- as.factor(pull(data, {{ group_var }}))
# first, fit an anova
anova_result <- aov(lhs ~ rhs)
# run multiple comparisons test, return the result
multcomp::glht(model = anova_result,
linfct = multcomp::mcp(rhs = "Tukey"))
}
# function to grab the letters from the multiple comparisons result
grab_letters_df <- function(glht_obj, group_var, level = 0.05) {
# multcomp::cld generates lots of warnings that can be safely ignored
# a quietly function will capture the warnings and let us decide which we want to pass forwards
quiet_cld <- quietly(multcomp::cld)
cld_result <- glht_obj %>%
quiet_cld(level = level)
# check warnings, send along any that don't match our criteria to ignore
cld_warnings <- cld_result$warnings
if (length(cld_warnings) > 0) {
cld_warnings_to_raise <- cld_warnings[!str_starts(cld_warnings, "Completion with")]
if (length(cld_warnings_to_raise) > 0) warning(cld_warnings_to_raise)
}
# if the warnings weren't bad, keep the analysis going
mcletters <- cld_result %>%
pluck("result", "mcletters")
# we only really want a couple items out of this thing,
# make a data frame out of those including a column for the original treatment group
tibble({{group_var}} := names(mcletters$Letters),
letters = mcletters$Letters,
letters_mono = mcletters$monospacedLetters)
}
# and a function to run the whole thing:
# take an input dataset, run the multiple comparisons, and grab the letters as a data frame
make_multicomp_letters <- function(data, group_var, continuous_var, level = 0.05) {
# first, run the multiple comparisons anova
glht_result <- run_multicomp(data,
group_var = {{ group_var }},
continuous_var = {{ continuous_var }})
# then grab the letters and identify if
grab_letters_df(glht_result,
group_var = {{ group_var }},
level = level) %>%
mutate(significance = sig_check(letters))
}
sig_check <- function(letters) {
uniques <- unique(letters)
# if all the groups have the same letter, there is no significant difference in the data
if (length(uniques) == 1) {
return("No pairwise differences are significant")
}
# if each group has a single unique letter, then all pairs of differences are significant
if (length(letters) == length(uniques) & all(str_length(letters) == 1)) {
return("All pairwise differences are significant")
} else {
return("Some pairwise differences are significant")
}
}
|
## ff_schedule (MFL) ##
#' Get a dataframe detailing every game for every franchise
#'
#' @param conn a conn object created by `ff_connect()`
#' @param ... for other platforms
#'
#' @examples
#' \donttest{
#' try({ # try only shown here because sometimes CRAN checks are weird
#' ssb_conn <- ff_connect(platform = "mfl", league_id = 54040, season = 2020)
#' ff_schedule(ssb_conn)
#' }) # end try
#' }
#'
#' @describeIn ff_schedule MFL: returns schedule data, one row for every franchise for every week. Completed games have result data.
#'
#' @export
ff_schedule.mfl_conn <- function(conn, ...) {
schedule_raw <- mfl_getendpoint(conn, "schedule") %>%
purrr::pluck("content", "schedule", "weeklySchedule") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1)
if (is.null(schedule_raw[["matchup"]])) {
return(NULL)
}
schedule <- schedule_raw %>%
dplyr::mutate(
matchup_length = purrr::map(.data$matchup, length),
matchup = purrr::map_if(.data$matchup, .data$matchup_length == 1, ~ list(.x))
) %>%
tidyr::unnest_longer("matchup") %>%
tidyr::unnest_wider("matchup") %>%
tidyr::hoist("franchise", "away" = 1, "home" = 2) %>%
tidyr::unnest_wider("away", names_sep = "_") %>%
tidyr::unnest_wider("home", names_sep = "_") %>%
dplyr::select(-dplyr::ends_with("isHome")) %>%
dplyr::mutate_at(dplyr::vars(dplyr::contains("score"), "week", dplyr::contains("spread")), as.numeric)
home <- schedule %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_remove(.x, "home_")) %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_replace(.x, "away_", "opponent_")) %>%
dplyr::select(dplyr::any_of(c("week",
"franchise_id" = "id",
"franchise_score" = "score",
"spread",
"result",
"opponent_id",
"opponent_score"
)))
away <- schedule %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_remove(.x, "away_")) %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_replace(.x, "home_", "opponent_")) %>%
dplyr::select(dplyr::any_of(c("week",
"franchise_id" = "id",
"franchise_score" = "score",
"spread",
"result",
"opponent_id",
"opponent_score"
)))
full_schedule <- dplyr::bind_rows(home, away) %>%
dplyr::arrange(.data$week, .data$franchise_id)
return(full_schedule)
}
| /R/mfl_schedule.R | permissive | SCasanova/ffscrapr | R | false | false | 2,425 | r | ## ff_schedule (MFL) ##
#' Get a dataframe detailing every game for every franchise
#'
#' @param conn a conn object created by `ff_connect()`
#' @param ... for other platforms
#'
#' @examples
#' \donttest{
#' try({ # try only shown here because sometimes CRAN checks are weird
#' ssb_conn <- ff_connect(platform = "mfl", league_id = 54040, season = 2020)
#' ff_schedule(ssb_conn)
#' }) # end try
#' }
#'
#' @describeIn ff_schedule MFL: returns schedule data, one row for every franchise for every week. Completed games have result data.
#'
#' @export
ff_schedule.mfl_conn <- function(conn, ...) {
schedule_raw <- mfl_getendpoint(conn, "schedule") %>%
purrr::pluck("content", "schedule", "weeklySchedule") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1)
if (is.null(schedule_raw[["matchup"]])) {
return(NULL)
}
schedule <- schedule_raw %>%
dplyr::mutate(
matchup_length = purrr::map(.data$matchup, length),
matchup = purrr::map_if(.data$matchup, .data$matchup_length == 1, ~ list(.x))
) %>%
tidyr::unnest_longer("matchup") %>%
tidyr::unnest_wider("matchup") %>%
tidyr::hoist("franchise", "away" = 1, "home" = 2) %>%
tidyr::unnest_wider("away", names_sep = "_") %>%
tidyr::unnest_wider("home", names_sep = "_") %>%
dplyr::select(-dplyr::ends_with("isHome")) %>%
dplyr::mutate_at(dplyr::vars(dplyr::contains("score"), "week", dplyr::contains("spread")), as.numeric)
home <- schedule %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_remove(.x, "home_")) %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_replace(.x, "away_", "opponent_")) %>%
dplyr::select(dplyr::any_of(c("week",
"franchise_id" = "id",
"franchise_score" = "score",
"spread",
"result",
"opponent_id",
"opponent_score"
)))
away <- schedule %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_remove(.x, "away_")) %>%
dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_replace(.x, "home_", "opponent_")) %>%
dplyr::select(dplyr::any_of(c("week",
"franchise_id" = "id",
"franchise_score" = "score",
"spread",
"result",
"opponent_id",
"opponent_score"
)))
full_schedule <- dplyr::bind_rows(home, away) %>%
dplyr::arrange(.data$week, .data$franchise_id)
return(full_schedule)
}
|
# titanic is avaliable in your workspace
# Check out the structure of titanic
str(titanic)
# Use ggplot() for the first instruction
ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge")
# Use ggplot() for the second instruction
ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge") +
facet_grid(". ~ Survived")
# Position jitter (use below)
posn.j <- position_jitter(0.5, 0)
# Use ggplot() for the last instruction
ggplot(titanic, aes(x = factor(Pclass), y = Age, col = factor(Sex))) +
geom_jitter(size = 3, alpha = 0.5, position = posn.j) +
facet_grid(. ~ Survived) | /Titanic.R | no_license | edmondmarv/Data-Visualization-with-ggplot2-part-1- | R | false | false | 682 | r | # titanic is avaliable in your workspace
# Check out the structure of titanic
str(titanic)
# Use ggplot() for the first instruction
ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge")
# Use ggplot() for the second instruction
ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) +
geom_bar(position = "dodge") +
facet_grid(". ~ Survived")
# Position jitter (use below)
posn.j <- position_jitter(0.5, 0)
# Use ggplot() for the last instruction
ggplot(titanic, aes(x = factor(Pclass), y = Age, col = factor(Sex))) +
geom_jitter(size = 3, alpha = 0.5, position = posn.j) +
facet_grid(. ~ Survived) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_connectivity_permutation.R
\name{get_largest_component_from_edgelist}
\alias{get_largest_component_from_edgelist}
\title{Returns the size of the largest component from a list of edges}
\usage{
get_largest_component_from_edgelist(edgelist)
}
\arguments{
\item{edgelist}{a two-column matrix}
}
\value{
the size of the largest connected component (numeric)
}
\description{
Returns the size of the largest component from a list of edges
}
| /packages/bcPcaAnalysis/man/get_largest_component_from_edgelist.Rd | no_license | a3cel2/bc_pca_git | R | false | true | 523 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_connectivity_permutation.R
\name{get_largest_component_from_edgelist}
\alias{get_largest_component_from_edgelist}
\title{Returns the size of the largest component from a list of edges}
\usage{
get_largest_component_from_edgelist(edgelist)
}
\arguments{
\item{edgelist}{a two-column matrix}
}
\value{
the size of the largest connected component (numeric)
}
\description{
Returns the size of the largest component from a list of edges
}
|
#!/usr/bin/env Rscript
# STILT R Executable
# For documentation, see https://uataq.github.io/stilt/
# Ben Fasoli
# User inputs ------------------------------------------------------------------
project <- 'stilt_run'
stilt_wd <- file.path('/shared', project)
output_wd <- file.path(stilt_wd, 'out')
lib.loc <- .libPaths()[1]
# Parallel simulation settings
# n_cores is # of cores per node
n_cores <- 2
n_nodes <- 1
slurm <- T
slurm_options <- list(
time = '300:00:00',
partition = 'compute'
)
# Simulation timing, yyyy-mm-dd HH:MM:SS (UTC)
#t_start <- '2015-06-18 22:00:00'
#t_end <- '2015-06-18 22:00:00'
#run_times <- seq(from = as.POSIXct(t_start, tz = 'UTC'),
# to = as.POSIXct(t_end, tz = 'UTC'),
# by = 'hour')
# Receptor location(s)
lati <- 40.5
long <- -112.0
zagl <- 5
# Expand the run times, latitudes, and longitudes to form the unique receptors
# that are used for each simulation
# receptors <- expand.grid(run_time = run_times, lati = lati, long = long,
# zagl = zagl, KEEP.OUT.ATTRS = F, stringsAsFactors = F)
receptors <- readRDS('/shared/stilt-tutorials/02-train/receptors.rds')
receptors$run_time <- as.POSIXct('2015-12-10 23:00:00', tz = 'UTC')
receptors$zagl <- 5
# Footprint grid settings, must set at least xmn, xmx, ymn, ymx below
hnf_plume <- T
projection <- '+proj=longlat'
smooth_factor <- 1
time_integrate <- F
xmn <- -112.30
xmx <- -111.52
ymn <- 40.390
ymx <- 40.95
xres <- 0.002
yres <- xres
# Meteorological data input
met_directory <- '/shared/stilt-tutorials/02-train/met/'
met_file_format <- '%Y%m%d.%Hz.hrrra'
n_met_min <- 1
# Model control
n_hours <- -24
numpar <- 200
rm_dat <- T
run_foot <- T
run_trajec <- T
timeout <- 3600
varsiwant <- c('time', 'indx', 'long', 'lati', 'zagl', 'sigw', 'tlgr', 'zsfc',
'icdx', 'temp', 'samt', 'foot', 'shtf', 'tcld', 'dmas', 'dens',
'rhfr', 'sphu', 'solw', 'lcld', 'zloc', 'dswf', 'wout', 'mlht',
'rain', 'crai', 'pres')
# Transport and dispersion settings
conage <- 48
cpack <- 1
delt <- 0
dxf <- 1
dyf <- 1
dzf <- 0.1
emisshrs <- 0.01
frhmax <- 3
frhs <- 1
frme <- 0.1
frmr <- 0
frts <- 0.1
frvs <- 0.1
hscale <- 10800
ichem <- 0
iconvect <- 0
initd <- 0
isot <- 0
kbls <- 1
kblt <- 1
kdef <- 1
khmax <- 9999
kmix0 <- 250
kmixd <- 3
kmsl <- 0
kpuff <- 0
krnd <- 6
kspl <- 1
kzmix <- 1
maxdim <- 1
maxpar <- min(10000, numpar)
mgmin <- 2000
ncycl <- 0
ndump <- 0
ninit <- 1
ndump <- 0
ninit <- 1
nturb <- 0
outdt <- 0
outfrac <- 0.9
p10f <- 1
qcycle <- 0
random <- 1
splitf <- 1
tkerd <- 0.18
tkern <- 0.18
tlfrac <- 0.1
tratio <- 0.9
tvmix <- 1
veght <- 0.5
vscale <- 200
w_option <- 0
zicontroltf <- 0
ziscale <- rep(list(rep(0.8, 24)), nrow(receptors))
z_top <- 25000
# Transport error settings
horcoruverr <- NA
siguverr <- NA
tluverr <- NA
zcoruverr <- NA
horcorzierr <- NA
sigzierr <- NA
tlzierr <- NA
# Interface to mutate the output object with user defined functions
before_trajec <- function() {output}
before_footprint <- function() {output}
# Startup messages -------------------------------------------------------------
message('Initializing STILT')
message('Number of receptors: ', nrow(receptors))
message('Number of parallel threads: ', n_nodes * n_cores)
# Source dependencies ----------------------------------------------------------
setwd(stilt_wd)
source('r/dependencies.r')
# Structure out directory ------------------------------------------------------
# Outputs are organized in three formats. by-id contains simulation files by
# unique simulation identifier. particles and footprints contain symbolic links
# to the particle trajectory and footprint files in by-id
system(paste0('rm -r ', output_wd, '/footprints'), ignore.stderr = T)
if (run_trajec) {
system(paste0('rm -r ', output_wd, '/by-id'), ignore.stderr = T)
system(paste0('rm -r ', output_wd, '/particles'), ignore.stderr = T)
}
for (d in c('by-id', 'particles', 'footprints')) {
d <- file.path(output_wd, d)
if (!file.exists(d))
dir.create(d, recursive = T)
}
# Met path symlink -------------------------------------------------------------
# Auto symlink the meteorological data path to the user's home directory to
# eliminate issues with long (>80 char) paths in fortran
if ((nchar(paste0(met_directory, met_file_format)) + 2) > 80) {
met_loc <- file.path(path.expand('~'), paste0('m', project))
if (!file.exists(met_loc)) invisible(file.symlink(met_directory, met_loc))
} else met_loc <- met_directory
# Run trajectory simulations ---------------------------------------------------
stilt_apply(FUN = simulation_step,
slurm = slurm,
slurm_options = slurm_options,
n_cores = n_cores,
n_nodes = n_nodes,
before_footprint = list(before_footprint),
before_trajec = list(before_trajec),
conage = conage,
cpack = cpack,
delt = delt,
emisshrs = emisshrs,
frhmax = frhmax,
frhs = frhs,
frme = frme,
frmr = frmr,
frts = frts,
frvs = frvs,
hnf_plume = hnf_plume,
horcoruverr = horcoruverr,
horcorzierr = horcorzierr,
ichem = ichem,
iconvect = iconvect,
initd = initd,
isot = isot,
kbls = kbls,
kblt = kblt,
kdef = kdef,
khmax = khmax,
kmix0 = kmix0,
kmixd = kmixd,
kmsl = kmsl,
kpuff = kpuff,
krnd = krnd,
kspl = kspl,
kzmix = kzmix,
maxdim = maxdim,
maxpar = maxpar,
lib.loc = lib.loc,
met_file_format = met_file_format,
met_loc = met_loc,
mgmin = mgmin,
n_hours = n_hours,
n_met_min = n_met_min,
ncycl = ncycl,
ndump = ndump,
ninit = ninit,
nturb = nturb,
numpar = numpar,
outdt = outdt,
outfrac = outfrac,
output_wd = output_wd,
p10f = p10f,
projection = projection,
qcycle = qcycle,
r_run_time = receptors$run_time,
r_lati = receptors$lati,
r_long = receptors$long,
r_zagl = receptors$zagl,
random = random,
rm_dat = rm_dat,
run_foot = run_foot,
run_trajec = run_trajec,
siguverr = siguverr,
sigzierr = sigzierr,
smooth_factor = smooth_factor,
splitf = splitf,
stilt_wd = stilt_wd,
time_integrate = time_integrate,
timeout = timeout,
tkerd = tkerd, tkern = tkern,
tlfrac = tlfrac,
tluverr = tluverr,
tlzierr = tlzierr,
tratio = tratio,
tvmix = tvmix,
varsiwant = list(varsiwant),
veght = veght,
vscale = vscale,
w_option = w_option,
xmn = xmn,
xmx = xmx,
xres = xres,
ymn = ymn,
ymx = ymx,
yres = yres,
zicontroltf = zicontroltf,
ziscale = ziscale,
z_top = z_top,
zcoruverr = zcoruverr) | /cases/memory-light/run_stilt_AWS.r | no_license | jimmielin/cs205-spring-20-final | R | false | false | 7,723 | r | #!/usr/bin/env Rscript
# STILT R Executable
# For documentation, see https://uataq.github.io/stilt/
# Ben Fasoli
# User inputs ------------------------------------------------------------------
project <- 'stilt_run'
stilt_wd <- file.path('/shared', project)
output_wd <- file.path(stilt_wd, 'out')
lib.loc <- .libPaths()[1]
# Parallel simulation settings
# n_cores is # of cores per node
n_cores <- 2
n_nodes <- 1
slurm <- T
slurm_options <- list(
time = '300:00:00',
partition = 'compute'
)
# Simulation timing, yyyy-mm-dd HH:MM:SS (UTC)
#t_start <- '2015-06-18 22:00:00'
#t_end <- '2015-06-18 22:00:00'
#run_times <- seq(from = as.POSIXct(t_start, tz = 'UTC'),
# to = as.POSIXct(t_end, tz = 'UTC'),
# by = 'hour')
# Receptor location(s)
lati <- 40.5
long <- -112.0
zagl <- 5
# Expand the run times, latitudes, and longitudes to form the unique receptors
# that are used for each simulation
# receptors <- expand.grid(run_time = run_times, lati = lati, long = long,
# zagl = zagl, KEEP.OUT.ATTRS = F, stringsAsFactors = F)
receptors <- readRDS('/shared/stilt-tutorials/02-train/receptors.rds')
receptors$run_time <- as.POSIXct('2015-12-10 23:00:00', tz = 'UTC')
receptors$zagl <- 5
# Footprint grid settings, must set at least xmn, xmx, ymn, ymx below
hnf_plume <- T
projection <- '+proj=longlat'
smooth_factor <- 1
time_integrate <- F
xmn <- -112.30
xmx <- -111.52
ymn <- 40.390
ymx <- 40.95
xres <- 0.002
yres <- xres
# Meteorological data input
met_directory <- '/shared/stilt-tutorials/02-train/met/'
met_file_format <- '%Y%m%d.%Hz.hrrra'
n_met_min <- 1
# Model control
n_hours <- -24
numpar <- 200
rm_dat <- T
run_foot <- T
run_trajec <- T
timeout <- 3600
varsiwant <- c('time', 'indx', 'long', 'lati', 'zagl', 'sigw', 'tlgr', 'zsfc',
'icdx', 'temp', 'samt', 'foot', 'shtf', 'tcld', 'dmas', 'dens',
'rhfr', 'sphu', 'solw', 'lcld', 'zloc', 'dswf', 'wout', 'mlht',
'rain', 'crai', 'pres')
# Transport and dispersion settings
conage <- 48
cpack <- 1
delt <- 0
dxf <- 1
dyf <- 1
dzf <- 0.1
emisshrs <- 0.01
frhmax <- 3
frhs <- 1
frme <- 0.1
frmr <- 0
frts <- 0.1
frvs <- 0.1
hscale <- 10800
ichem <- 0
iconvect <- 0
initd <- 0
isot <- 0
kbls <- 1
kblt <- 1
kdef <- 1
khmax <- 9999
kmix0 <- 250
kmixd <- 3
kmsl <- 0
kpuff <- 0
krnd <- 6
kspl <- 1
kzmix <- 1
maxdim <- 1
maxpar <- min(10000, numpar)
mgmin <- 2000
ncycl <- 0
ndump <- 0
ninit <- 1
ndump <- 0
ninit <- 1
nturb <- 0
outdt <- 0
outfrac <- 0.9
p10f <- 1
qcycle <- 0
random <- 1
splitf <- 1
tkerd <- 0.18
tkern <- 0.18
tlfrac <- 0.1
tratio <- 0.9
tvmix <- 1
veght <- 0.5
vscale <- 200
w_option <- 0
zicontroltf <- 0
ziscale <- rep(list(rep(0.8, 24)), nrow(receptors))
z_top <- 25000
# Transport error settings
horcoruverr <- NA
siguverr <- NA
tluverr <- NA
zcoruverr <- NA
horcorzierr <- NA
sigzierr <- NA
tlzierr <- NA
# Interface to mutate the output object with user defined functions
before_trajec <- function() {output}
before_footprint <- function() {output}
# Startup messages -------------------------------------------------------------
message('Initializing STILT')
message('Number of receptors: ', nrow(receptors))
message('Number of parallel threads: ', n_nodes * n_cores)
# Source dependencies ----------------------------------------------------------
setwd(stilt_wd)
source('r/dependencies.r')
# Structure out directory ------------------------------------------------------
# Outputs are organized in three formats. by-id contains simulation files by
# unique simulation identifier. particles and footprints contain symbolic links
# to the particle trajectory and footprint files in by-id
system(paste0('rm -r ', output_wd, '/footprints'), ignore.stderr = T)
if (run_trajec) {
system(paste0('rm -r ', output_wd, '/by-id'), ignore.stderr = T)
system(paste0('rm -r ', output_wd, '/particles'), ignore.stderr = T)
}
for (d in c('by-id', 'particles', 'footprints')) {
d <- file.path(output_wd, d)
if (!file.exists(d))
dir.create(d, recursive = T)
}
# Met path symlink -------------------------------------------------------------
# Auto symlink the meteorological data path to the user's home directory to
# eliminate issues with long (>80 char) paths in fortran
if ((nchar(paste0(met_directory, met_file_format)) + 2) > 80) {
met_loc <- file.path(path.expand('~'), paste0('m', project))
if (!file.exists(met_loc)) invisible(file.symlink(met_directory, met_loc))
} else met_loc <- met_directory
# Run trajectory simulations ---------------------------------------------------
stilt_apply(FUN = simulation_step,
slurm = slurm,
slurm_options = slurm_options,
n_cores = n_cores,
n_nodes = n_nodes,
before_footprint = list(before_footprint),
before_trajec = list(before_trajec),
conage = conage,
cpack = cpack,
delt = delt,
emisshrs = emisshrs,
frhmax = frhmax,
frhs = frhs,
frme = frme,
frmr = frmr,
frts = frts,
frvs = frvs,
hnf_plume = hnf_plume,
horcoruverr = horcoruverr,
horcorzierr = horcorzierr,
ichem = ichem,
iconvect = iconvect,
initd = initd,
isot = isot,
kbls = kbls,
kblt = kblt,
kdef = kdef,
khmax = khmax,
kmix0 = kmix0,
kmixd = kmixd,
kmsl = kmsl,
kpuff = kpuff,
krnd = krnd,
kspl = kspl,
kzmix = kzmix,
maxdim = maxdim,
maxpar = maxpar,
lib.loc = lib.loc,
met_file_format = met_file_format,
met_loc = met_loc,
mgmin = mgmin,
n_hours = n_hours,
n_met_min = n_met_min,
ncycl = ncycl,
ndump = ndump,
ninit = ninit,
nturb = nturb,
numpar = numpar,
outdt = outdt,
outfrac = outfrac,
output_wd = output_wd,
p10f = p10f,
projection = projection,
qcycle = qcycle,
r_run_time = receptors$run_time,
r_lati = receptors$lati,
r_long = receptors$long,
r_zagl = receptors$zagl,
random = random,
rm_dat = rm_dat,
run_foot = run_foot,
run_trajec = run_trajec,
siguverr = siguverr,
sigzierr = sigzierr,
smooth_factor = smooth_factor,
splitf = splitf,
stilt_wd = stilt_wd,
time_integrate = time_integrate,
timeout = timeout,
tkerd = tkerd, tkern = tkern,
tlfrac = tlfrac,
tluverr = tluverr,
tlzierr = tlzierr,
tratio = tratio,
tvmix = tvmix,
varsiwant = list(varsiwant),
veght = veght,
vscale = vscale,
w_option = w_option,
xmn = xmn,
xmx = xmx,
xres = xres,
ymn = ymn,
ymx = ymx,
yres = yres,
zicontroltf = zicontroltf,
ziscale = ziscale,
z_top = z_top,
zcoruverr = zcoruverr) |
#' align plots on an axis
#'
#' given a list of plots, align them on plotting space
#' @name align_plot
#' @param plot_list list of ggplot objects
#' @param axis character string to specify the axis to align plotting space on, one of both, width, height
#' @return ggplotGrob object
#' @import gridExtra
#' @import gtable
align_plot <- function(plot_list, axis='both')
{
# convert all ggplot objects to grob obects
plots <- lapply(plot_list, ggplotGrob)
# if specified align the plot widths
if(axis == 'width' | axis == 'both')
{
# Obtain the max width in all plots in the list
maxwidth <- do.call(grid::unit.pmax, lapply(plots, extr_ggplotGrob_width))
# set the max width for all plots in the list
plots <- lapply(plots, assign_ggplotGrob_width, maxwidth)
}
# if specified alter the plot heights
if(axis == 'height' | axis == 'both')
{
# Obtain the max height in all plots in the list
maxheight <- do.call(grid::unit.pmax, lapply(plots, extr_ggplotGrob_height))
# set the max height for all plots in the list
plots <- lapply(plots, assign_ggplotGrob_width, maxheight)
}
# Combine the plots of now equal widths/heights
plots <- do.call(arrangeGrob, plots)
return(plots)
} | /R/align_plot.R | permissive | hjanime/GenVisR | R | false | false | 1,314 | r | #' align plots on an axis
#'
#' given a list of plots, align them on plotting space
#' @name align_plot
#' @param plot_list list of ggplot objects
#' @param axis character string to specify the axis to align plotting space on, one of both, width, height
#' @return ggplotGrob object
#' @import gridExtra
#' @import gtable
align_plot <- function(plot_list, axis='both')
{
# convert all ggplot objects to grob obects
plots <- lapply(plot_list, ggplotGrob)
# if specified align the plot widths
if(axis == 'width' | axis == 'both')
{
# Obtain the max width in all plots in the list
maxwidth <- do.call(grid::unit.pmax, lapply(plots, extr_ggplotGrob_width))
# set the max width for all plots in the list
plots <- lapply(plots, assign_ggplotGrob_width, maxwidth)
}
# if specified alter the plot heights
if(axis == 'height' | axis == 'both')
{
# Obtain the max height in all plots in the list
maxheight <- do.call(grid::unit.pmax, lapply(plots, extr_ggplotGrob_height))
# set the max height for all plots in the list
plots <- lapply(plots, assign_ggplotGrob_width, maxheight)
}
# Combine the plots of now equal widths/heights
plots <- do.call(arrangeGrob, plots)
return(plots)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r-functions.selection.R
\name{compute.spatial.stats}
\alias{compute.spatial.stats}
\title{Compute statistics related to mutational spatial distributions}
\usage{
compute.spatial.stats(output.folder, output.prefix, bw = 5, m = 3)
}
\arguments{
\item{output.folder:}{A folder containing a "mut.summary.txt" file produced by the parse.aggregated.mut() function.}
\item{output.prefix:}{A prefix string used label the various files. Spatial statistics is written to a file "prefix.spatial.txt".}
}
\description{
Compute statistics related to mutational spatial distributions
}
\examples{
: compute.spatial.stat(folder='./examples/', prefix='TCGA.ACC');
}
| /man/compute.spatial.stats.Rd | no_license | liliulab/gust | R | false | true | 729 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r-functions.selection.R
\name{compute.spatial.stats}
\alias{compute.spatial.stats}
\title{Compute statistics related to mutational spatial distributions}
\usage{
compute.spatial.stats(output.folder, output.prefix, bw = 5, m = 3)
}
\arguments{
\item{output.folder:}{A folder containing a "mut.summary.txt" file produced by the parse.aggregated.mut() function.}
\item{output.prefix:}{A prefix string used label the various files. Spatial statistics is written to a file "prefix.spatial.txt".}
}
\description{
Compute statistics related to mutational spatial distributions
}
\examples{
: compute.spatial.stat(folder='./examples/', prefix='TCGA.ACC');
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479674L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615828732-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479674L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
# #====================== calculating MI matrix of a dataset
# dataMat <- as.matrix(matA)
# dataMat <- scale(dataMat)
# dataMat <- discretize(dataMat)
# minetMIM1 <- build.mim(dataMat)
#
# minetMIM2 <- build.mim(matA)
#
# all(minetMIM2 == minetMIM3)
#
# View(minetMIM2)
# View(minetMIM3)
# MIMat <- matrix(0,nrow = ngenes, ncol = ngenes)
#
# rownames(MIMat) <- geneNames
# colnames(MIMat) <- geneNames
#
# #start.time <- Sys.time()
# for(a in 1:ngenes){
# for(b in 1:ngenes){
# MIMat[a,b] <- mutinformation(dataMat[,a],dataMat[,b])
#
# }
#
# }
# mim <- round(build.mim(matA), 5)
# df <- data.frame(mim)
# fname <- paste(rStoreDataPath,"mim.tsv",sep="")
# write.table(df, file = fname, sep = "\t")
#
# rm(list=ls())
# end.time <- Sys.time()
# time.taken <- end.time - start.time
# time.taken
#
# df <- data.frame(MIMat)
# write.table(df, file = "C:\\Users\\USER\\Desktop\\dream4 data\\Dream4-5MIMatrix.tsv", sep = "\t",row.names = F)
#
# # Dream4-1MIMatrix.tsv
#
# rm(list=ls())
#=================================================
# getting net 1 MI matrix
fname <- paste(rStoreDataPath,"mim.tsv",sep="")
MIMat <- read.table(fname, header = T, sep = "\t")
MIMat <- as.matrix(MIMat)
# No CLuster DPI
start.time <- as.numeric(Sys.time())
tp <- c()
fp <- c()
tn <- c()
fn <- c()
precision <- c()
recall <- c()
# resultNoClusterConnectoinMatrix.tsv
fname <- paste(rStoreDataPath,"resultNoClusterConnectoinMatrix.tsv",sep="")
tmpgconnMat <- read.table(fname, header = T, sep = "\t")
geneConnectionMat <- as.matrix(tmpgconnMat)
# for each threshold find TPs,FPs......
for(n in 1:length(thresholdVctr)){
threshold <- thresholdVctr[n]
tempGeneConnMat <- geneConnectionMat
for(i in 1:ngenes){
for(j in 1:ngenes){
if(abs(tempGeneConnMat[i,j]) <= threshold){
if(abs(tempGeneConnMat[j,i]) <= threshold){
tempGeneConnMat[i,j] <- 0
tempGeneConnMat[j,i] <- 0
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}
}
dpiTempGeneConnMat <- tempGeneConnMat
# now run DPI
for(a in 3:ngenes){
for(b in 2:(a-1)){
for(c in 1:(b-1)){
# do DPI only when a,b / b,c / a,c is 1 in geneConnectionMat
if(dpiTempGeneConnMat[a,b] == 1){
if(dpiTempGeneConnMat[b,c] == 1){
if(dpiTempGeneConnMat[a,c] == 1){
# all three genes have pair connection
ab <- MIMat[a,b]
bc <- MIMat[b,c]
ac <- MIMat[a,c]
if(ab <= min(bc,ac)){
# ab is indirect, remove it
tempGeneConnMat[a,b] = 0
tempGeneConnMat[b,a] = 0
}else if(bc <= min(ab,ac)){
# bc is indirect, remove it
tempGeneConnMat[b,c] = 0
tempGeneConnMat[c,b] = 0
}else if(ac <= min(ab,bc)){
# ac is indirect, remove it
tempGeneConnMat[a,c] = 0
tempGeneConnMat[c,a] = 0
}
}
}
}
}
}
}
#=============================== TP, FP ,TN, FN
ctp <- 0
cfp <- 0
cfn <- 0
ctn <- 0
for(e in 1:ngenes){
#for each row
for(f in 1:ngenes){
#for each col
if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==1)){
#TP
ctp <- ctp + 1
}else if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==0)){
#FP
cfp <- cfp + 1
}else if((tempGeneConnMat[e,f]==0) & (gsConnectionMat[e,f]==1)){
#FN
cfn <- cfn + 1
}else{
#TN
ctn <- ctn + 1
}
}
}
cpr <- ctp/(ctp+cfp)
crc <- ctp/(ctp+cfn)
tp[n] <- ctp
fp[n] <- cfp
tn[n] <- ctn
fn[n] <- cfn
precision[n] <- cpr
recall[n] <- crc
print(paste("done with threshold = ", threshold))
} # for each threshold ends
end.time <- as.numeric(Sys.time())
time.taken <- end.time - start.time
print(paste("DPI NO CLUSTER VERSION......time taken = ",time.taken," sec"))
df <- data.frame(THRESHOLD=thresholdVctr,TP=tp,FP=fp,TN=tn,FN=fn,PRECISION=precision,
RECALL=recall)
fname <- paste(rStoreDataPath ,"resultNoClusterTPFPsForAllThresholdDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
# store the time needed for each cluster
df <- data.frame(TIME=time.taken)
fname <- paste(rStoreDataPath ,"resultNoClusterTimeInSecDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
#============================================
# SELECTED MERGED FROM UNMERGED DPI
timeTaken <- c()
for(m in 1:length(clusterVCtr)){
start.time <- as.numeric(Sys.time())
tp <- c()
fp <- c()
tn <- c()
fn <- c()
precision <- c()
recall <- c()
ncluster <- clusterVCtr[m]
# resultSelectedMergedFromUnMergedCluster=2ConnectoinMatrix.tsv
fname <- paste(rStoreDataPath,"resultSelectedMergedFromUnMergedCluster=", ncluster,"ConnectoinMatrix.tsv",sep="")
tmpgconnMat <- read.table(fname, header = T, sep = "\t")
geneConnectionMat <- as.matrix(tmpgconnMat)
# for each threshold find TPs,FPs......
for(n in 1:length(thresholdVctr)){
threshold <- thresholdVctr[n]
tempGeneConnMat <- geneConnectionMat
for(i in 1:ngenes){
for(j in 1:ngenes){
if(abs(tempGeneConnMat[i,j]) <= threshold){
if(abs(tempGeneConnMat[j,i]) <= threshold){
tempGeneConnMat[i,j] <- 0
tempGeneConnMat[j,i] <- 0
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}
}
dpiTempGeneConnMat <- tempGeneConnMat
# now run DPI
for(a in 3:ngenes){
for(b in 2:(a-1)){
for(c in 1:(b-1)){
# do DPI only when a,b / b,c / a,c is 1 in geneConnectionMat
if(dpiTempGeneConnMat[a,b] == 1){
if(dpiTempGeneConnMat[b,c] == 1){
if(dpiTempGeneConnMat[a,c] == 1){
# all three genes have pair connection
ab <- MIMat[a,b]
bc <- MIMat[b,c]
ac <- MIMat[a,c]
if(ab <= min(bc,ac)){
# ab is indirect, remove it
tempGeneConnMat[a,b] = 0
tempGeneConnMat[b,a] = 0
}else if(bc <= min(ab,ac)){
# bc is indirect, remove it
tempGeneConnMat[b,c] = 0
tempGeneConnMat[c,b] = 0
}else if(ac <= min(ab,bc)){
# ac is indirect, remove it
tempGeneConnMat[a,c] = 0
tempGeneConnMat[c,a] = 0
}
}
}
}
}
}
}
#=============================== TP, FP ,TN, FN
ctp <- 0
cfp <- 0
cfn <- 0
ctn <- 0
for(e in 1:ngenes){
#for each row
for(f in 1:ngenes){
#for each col
if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==1)){
#TP
ctp <- ctp + 1
}else if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==0)){
#FP
cfp <- cfp + 1
}else if((tempGeneConnMat[e,f]==0) & (gsConnectionMat[e,f]==1)){
#FN
cfn <- cfn + 1
}else{
#TN
ctn <- ctn + 1
}
}
}
cpr <- ctp/(ctp+cfp)
crc <- ctp/(ctp+cfn)
tp[n] <- ctp
fp[n] <- cfp
tn[n] <- ctn
fn[n] <- cfn
precision[n] <- cpr
recall[n] <- crc
print(paste("done for cluster = ",ncluster," with threshold = ", threshold))
} # for each threshold ends
end.time <- as.numeric(Sys.time())
time.taken <- end.time - start.time
timeTaken[m] <- time.taken
print(paste("DPI SELECTED MERGED VERSION............time taken for cluster = ",ncluster, " is ",time.taken," sec"))
df <- data.frame(THRESHOLD=thresholdVctr,TP=tp,FP=fp,TN=tn,FN=fn,PRECISION=precision,
RECALL=recall)
# resultSelectedMergedFromUnMergedCluster=2TPFPsForAllThreshold.tsv
fname <- paste(rStoreDataPath ,"resultSelectedMergedFromUnMergedCluster=",ncluster,"TPFPsForAllThresholdDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
} # for loops ends for diff cluster
# store the time needed for each cluster
df <- data.frame(CLUSTER=clusterVCtr ,TIME=timeTaken)
fname <- paste(rStoreDataPath ,"resultSelectedMergedFromUnMergedCluster=TimeInSecDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
#=======================================
# UNMERGED DPI
timeTaken <- c()
for(m in 1:length(clusterVCtr)){
start.time <- as.numeric(Sys.time())
tp <- c()
fp <- c()
tn <- c()
fn <- c()
precision <- c()
recall <- c()
ncluster <- clusterVCtr[m]
fname <- paste(rStoreDataPath,"resultUnMergedCluster=", ncluster,"ConnectoinMatrix.tsv",sep="")
tmpgconnMat <- read.table(fname, header = T, sep = "\t")
geneConnectionMat <- as.matrix(tmpgconnMat)
# for each threshold find TPs,FPs......
for(n in 1:length(thresholdVctr)){
threshold <- thresholdVctr[n]
tempGeneConnMat <- geneConnectionMat
for(i in 1:ngenes){
for(j in 1:ngenes){
if(abs(tempGeneConnMat[i,j]) <= threshold){
if(abs(tempGeneConnMat[j,i]) <= threshold){
tempGeneConnMat[i,j] <- 0
tempGeneConnMat[j,i] <- 0
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}
}
dpiTempGeneConnMat <- tempGeneConnMat
# now run DPI
for(a in 3:ngenes){
for(b in 2:(a-1)){
for(c in 1:(b-1)){
# do DPI only when a,b / b,c / a,c is 1 in geneConnectionMat
if(dpiTempGeneConnMat[a,b] == 1){
if(dpiTempGeneConnMat[b,c] == 1){
if(dpiTempGeneConnMat[a,c] == 1){
# all three genes have pair connection
ab <- MIMat[a,b]
bc <- MIMat[b,c]
ac <- MIMat[a,c]
if(ab <= min(bc,ac)){
# ab is indirect, remove it
tempGeneConnMat[a,b] = 0
tempGeneConnMat[b,a] = 0
}else if(bc <= min(ab,ac)){
# bc is indirect, remove it
tempGeneConnMat[b,c] = 0
tempGeneConnMat[c,b] = 0
}else if(ac <= min(ab,bc)){
# ac is indirect, remove it
tempGeneConnMat[a,c] = 0
tempGeneConnMat[c,a] = 0
}
}
}
}
}
}
}
#=============================== TP, FP ,TN, FN
ctp <- 0
cfp <- 0
cfn <- 0
ctn <- 0
for(e in 1:ngenes){
#for each row
for(f in 1:ngenes){
#for each col
if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==1)){
#TP
ctp <- ctp + 1
}else if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==0)){
#FP
cfp <- cfp + 1
}else if((tempGeneConnMat[e,f]==0) & (gsConnectionMat[e,f]==1)){
#FN
cfn <- cfn + 1
}else{
#TN
ctn <- ctn + 1
}
}
}
cpr <- ctp/(ctp+cfp)
crc <- ctp/(ctp+cfn)
tp[n] <- ctp
fp[n] <- cfp
tn[n] <- ctn
fn[n] <- cfn
precision[n] <- cpr
recall[n] <- crc
print(paste("done for cluster = ",ncluster," after DPI with threshold = ", threshold))
} # for each threshold ends
end.time <- as.numeric(Sys.time())
time.taken <- end.time - start.time
timeTaken[m] <- time.taken
print(paste("UNMERGED VERSION............time taken for cluster = ",ncluster, " is ",time.taken," sec"))
df <- data.frame(THRESHOLD=thresholdVctr,TP=tp,FP=fp,TN=tn,FN=fn,PRECISION=precision,
RECALL=recall)
fname <- paste(rStoreDataPath ,"resultUnMergedCluster=",ncluster,"TPFPsForAllThresholdDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
} # for loops ends for diff cluster
# store the time needed for each cluster
df <- data.frame(CLUSTER=clusterVCtr ,TIME=timeTaken)
fname <- paste(rStoreDataPath ,"resultUnMergedCluster=TimeInSecDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
#==================================DPI END
| /Dream4DPI.R | no_license | aag13/thesis | R | false | false | 14,049 | r | # #====================== calculating MI matrix of a dataset
# dataMat <- as.matrix(matA)
# dataMat <- scale(dataMat)
# dataMat <- discretize(dataMat)
# minetMIM1 <- build.mim(dataMat)
#
# minetMIM2 <- build.mim(matA)
#
# all(minetMIM2 == minetMIM3)
#
# View(minetMIM2)
# View(minetMIM3)
# MIMat <- matrix(0,nrow = ngenes, ncol = ngenes)
#
# rownames(MIMat) <- geneNames
# colnames(MIMat) <- geneNames
#
# #start.time <- Sys.time()
# for(a in 1:ngenes){
# for(b in 1:ngenes){
# MIMat[a,b] <- mutinformation(dataMat[,a],dataMat[,b])
#
# }
#
# }
# mim <- round(build.mim(matA), 5)
# df <- data.frame(mim)
# fname <- paste(rStoreDataPath,"mim.tsv",sep="")
# write.table(df, file = fname, sep = "\t")
#
# rm(list=ls())
# end.time <- Sys.time()
# time.taken <- end.time - start.time
# time.taken
#
# df <- data.frame(MIMat)
# write.table(df, file = "C:\\Users\\USER\\Desktop\\dream4 data\\Dream4-5MIMatrix.tsv", sep = "\t",row.names = F)
#
# # Dream4-1MIMatrix.tsv
#
# rm(list=ls())
#=================================================
# getting net 1 MI matrix
fname <- paste(rStoreDataPath,"mim.tsv",sep="")
MIMat <- read.table(fname, header = T, sep = "\t")
MIMat <- as.matrix(MIMat)
# No CLuster DPI
start.time <- as.numeric(Sys.time())
tp <- c()
fp <- c()
tn <- c()
fn <- c()
precision <- c()
recall <- c()
# resultNoClusterConnectoinMatrix.tsv
fname <- paste(rStoreDataPath,"resultNoClusterConnectoinMatrix.tsv",sep="")
tmpgconnMat <- read.table(fname, header = T, sep = "\t")
geneConnectionMat <- as.matrix(tmpgconnMat)
# for each threshold find TPs,FPs......
for(n in 1:length(thresholdVctr)){
threshold <- thresholdVctr[n]
tempGeneConnMat <- geneConnectionMat
for(i in 1:ngenes){
for(j in 1:ngenes){
if(abs(tempGeneConnMat[i,j]) <= threshold){
if(abs(tempGeneConnMat[j,i]) <= threshold){
tempGeneConnMat[i,j] <- 0
tempGeneConnMat[j,i] <- 0
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}
}
dpiTempGeneConnMat <- tempGeneConnMat
# now run DPI
for(a in 3:ngenes){
for(b in 2:(a-1)){
for(c in 1:(b-1)){
# do DPI only when a,b / b,c / a,c is 1 in geneConnectionMat
if(dpiTempGeneConnMat[a,b] == 1){
if(dpiTempGeneConnMat[b,c] == 1){
if(dpiTempGeneConnMat[a,c] == 1){
# all three genes have pair connection
ab <- MIMat[a,b]
bc <- MIMat[b,c]
ac <- MIMat[a,c]
if(ab <= min(bc,ac)){
# ab is indirect, remove it
tempGeneConnMat[a,b] = 0
tempGeneConnMat[b,a] = 0
}else if(bc <= min(ab,ac)){
# bc is indirect, remove it
tempGeneConnMat[b,c] = 0
tempGeneConnMat[c,b] = 0
}else if(ac <= min(ab,bc)){
# ac is indirect, remove it
tempGeneConnMat[a,c] = 0
tempGeneConnMat[c,a] = 0
}
}
}
}
}
}
}
#=============================== TP, FP ,TN, FN
ctp <- 0
cfp <- 0
cfn <- 0
ctn <- 0
for(e in 1:ngenes){
#for each row
for(f in 1:ngenes){
#for each col
if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==1)){
#TP
ctp <- ctp + 1
}else if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==0)){
#FP
cfp <- cfp + 1
}else if((tempGeneConnMat[e,f]==0) & (gsConnectionMat[e,f]==1)){
#FN
cfn <- cfn + 1
}else{
#TN
ctn <- ctn + 1
}
}
}
cpr <- ctp/(ctp+cfp)
crc <- ctp/(ctp+cfn)
tp[n] <- ctp
fp[n] <- cfp
tn[n] <- ctn
fn[n] <- cfn
precision[n] <- cpr
recall[n] <- crc
print(paste("done with threshold = ", threshold))
} # for each threshold ends
end.time <- as.numeric(Sys.time())
time.taken <- end.time - start.time
print(paste("DPI NO CLUSTER VERSION......time taken = ",time.taken," sec"))
df <- data.frame(THRESHOLD=thresholdVctr,TP=tp,FP=fp,TN=tn,FN=fn,PRECISION=precision,
RECALL=recall)
fname <- paste(rStoreDataPath ,"resultNoClusterTPFPsForAllThresholdDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
# store the time needed for each cluster
df <- data.frame(TIME=time.taken)
fname <- paste(rStoreDataPath ,"resultNoClusterTimeInSecDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
#============================================
# SELECTED MERGED FROM UNMERGED DPI
timeTaken <- c()
for(m in 1:length(clusterVCtr)){
start.time <- as.numeric(Sys.time())
tp <- c()
fp <- c()
tn <- c()
fn <- c()
precision <- c()
recall <- c()
ncluster <- clusterVCtr[m]
# resultSelectedMergedFromUnMergedCluster=2ConnectoinMatrix.tsv
fname <- paste(rStoreDataPath,"resultSelectedMergedFromUnMergedCluster=", ncluster,"ConnectoinMatrix.tsv",sep="")
tmpgconnMat <- read.table(fname, header = T, sep = "\t")
geneConnectionMat <- as.matrix(tmpgconnMat)
# for each threshold find TPs,FPs......
for(n in 1:length(thresholdVctr)){
threshold <- thresholdVctr[n]
tempGeneConnMat <- geneConnectionMat
for(i in 1:ngenes){
for(j in 1:ngenes){
if(abs(tempGeneConnMat[i,j]) <= threshold){
if(abs(tempGeneConnMat[j,i]) <= threshold){
tempGeneConnMat[i,j] <- 0
tempGeneConnMat[j,i] <- 0
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}
}
dpiTempGeneConnMat <- tempGeneConnMat
# now run DPI
for(a in 3:ngenes){
for(b in 2:(a-1)){
for(c in 1:(b-1)){
# do DPI only when a,b / b,c / a,c is 1 in geneConnectionMat
if(dpiTempGeneConnMat[a,b] == 1){
if(dpiTempGeneConnMat[b,c] == 1){
if(dpiTempGeneConnMat[a,c] == 1){
# all three genes have pair connection
ab <- MIMat[a,b]
bc <- MIMat[b,c]
ac <- MIMat[a,c]
if(ab <= min(bc,ac)){
# ab is indirect, remove it
tempGeneConnMat[a,b] = 0
tempGeneConnMat[b,a] = 0
}else if(bc <= min(ab,ac)){
# bc is indirect, remove it
tempGeneConnMat[b,c] = 0
tempGeneConnMat[c,b] = 0
}else if(ac <= min(ab,bc)){
# ac is indirect, remove it
tempGeneConnMat[a,c] = 0
tempGeneConnMat[c,a] = 0
}
}
}
}
}
}
}
#=============================== TP, FP ,TN, FN
ctp <- 0
cfp <- 0
cfn <- 0
ctn <- 0
for(e in 1:ngenes){
#for each row
for(f in 1:ngenes){
#for each col
if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==1)){
#TP
ctp <- ctp + 1
}else if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==0)){
#FP
cfp <- cfp + 1
}else if((tempGeneConnMat[e,f]==0) & (gsConnectionMat[e,f]==1)){
#FN
cfn <- cfn + 1
}else{
#TN
ctn <- ctn + 1
}
}
}
cpr <- ctp/(ctp+cfp)
crc <- ctp/(ctp+cfn)
tp[n] <- ctp
fp[n] <- cfp
tn[n] <- ctn
fn[n] <- cfn
precision[n] <- cpr
recall[n] <- crc
print(paste("done for cluster = ",ncluster," with threshold = ", threshold))
} # for each threshold ends
end.time <- as.numeric(Sys.time())
time.taken <- end.time - start.time
timeTaken[m] <- time.taken
print(paste("DPI SELECTED MERGED VERSION............time taken for cluster = ",ncluster, " is ",time.taken," sec"))
df <- data.frame(THRESHOLD=thresholdVctr,TP=tp,FP=fp,TN=tn,FN=fn,PRECISION=precision,
RECALL=recall)
# resultSelectedMergedFromUnMergedCluster=2TPFPsForAllThreshold.tsv
fname <- paste(rStoreDataPath ,"resultSelectedMergedFromUnMergedCluster=",ncluster,"TPFPsForAllThresholdDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
} # for loops ends for diff cluster
# store the time needed for each cluster
df <- data.frame(CLUSTER=clusterVCtr ,TIME=timeTaken)
fname <- paste(rStoreDataPath ,"resultSelectedMergedFromUnMergedCluster=TimeInSecDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
#=======================================
# UNMERGED DPI
timeTaken <- c()
for(m in 1:length(clusterVCtr)){
start.time <- as.numeric(Sys.time())
tp <- c()
fp <- c()
tn <- c()
fn <- c()
precision <- c()
recall <- c()
ncluster <- clusterVCtr[m]
fname <- paste(rStoreDataPath,"resultUnMergedCluster=", ncluster,"ConnectoinMatrix.tsv",sep="")
tmpgconnMat <- read.table(fname, header = T, sep = "\t")
geneConnectionMat <- as.matrix(tmpgconnMat)
# for each threshold find TPs,FPs......
for(n in 1:length(thresholdVctr)){
threshold <- thresholdVctr[n]
tempGeneConnMat <- geneConnectionMat
for(i in 1:ngenes){
for(j in 1:ngenes){
if(abs(tempGeneConnMat[i,j]) <= threshold){
if(abs(tempGeneConnMat[j,i]) <= threshold){
tempGeneConnMat[i,j] <- 0
tempGeneConnMat[j,i] <- 0
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}else{
tempGeneConnMat[i,j] <- 1
tempGeneConnMat[j,i] <- 1
}
}
}
dpiTempGeneConnMat <- tempGeneConnMat
# now run DPI
for(a in 3:ngenes){
for(b in 2:(a-1)){
for(c in 1:(b-1)){
# do DPI only when a,b / b,c / a,c is 1 in geneConnectionMat
if(dpiTempGeneConnMat[a,b] == 1){
if(dpiTempGeneConnMat[b,c] == 1){
if(dpiTempGeneConnMat[a,c] == 1){
# all three genes have pair connection
ab <- MIMat[a,b]
bc <- MIMat[b,c]
ac <- MIMat[a,c]
if(ab <= min(bc,ac)){
# ab is indirect, remove it
tempGeneConnMat[a,b] = 0
tempGeneConnMat[b,a] = 0
}else if(bc <= min(ab,ac)){
# bc is indirect, remove it
tempGeneConnMat[b,c] = 0
tempGeneConnMat[c,b] = 0
}else if(ac <= min(ab,bc)){
# ac is indirect, remove it
tempGeneConnMat[a,c] = 0
tempGeneConnMat[c,a] = 0
}
}
}
}
}
}
}
#=============================== TP, FP ,TN, FN
ctp <- 0
cfp <- 0
cfn <- 0
ctn <- 0
for(e in 1:ngenes){
#for each row
for(f in 1:ngenes){
#for each col
if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==1)){
#TP
ctp <- ctp + 1
}else if((tempGeneConnMat[e,f]==1) & (gsConnectionMat[e,f]==0)){
#FP
cfp <- cfp + 1
}else if((tempGeneConnMat[e,f]==0) & (gsConnectionMat[e,f]==1)){
#FN
cfn <- cfn + 1
}else{
#TN
ctn <- ctn + 1
}
}
}
cpr <- ctp/(ctp+cfp)
crc <- ctp/(ctp+cfn)
tp[n] <- ctp
fp[n] <- cfp
tn[n] <- ctn
fn[n] <- cfn
precision[n] <- cpr
recall[n] <- crc
print(paste("done for cluster = ",ncluster," after DPI with threshold = ", threshold))
} # for each threshold ends
end.time <- as.numeric(Sys.time())
time.taken <- end.time - start.time
timeTaken[m] <- time.taken
print(paste("UNMERGED VERSION............time taken for cluster = ",ncluster, " is ",time.taken," sec"))
df <- data.frame(THRESHOLD=thresholdVctr,TP=tp,FP=fp,TN=tn,FN=fn,PRECISION=precision,
RECALL=recall)
fname <- paste(rStoreDataPath ,"resultUnMergedCluster=",ncluster,"TPFPsForAllThresholdDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
} # for loops ends for diff cluster
# store the time needed for each cluster
df <- data.frame(CLUSTER=clusterVCtr ,TIME=timeTaken)
fname <- paste(rStoreDataPath ,"resultUnMergedCluster=TimeInSecDPI.tsv",sep="")
write.table(df, file = fname, sep = "\t", row.names = FALSE)
#==================================DPI END
|
library(MASS)
library(tidyverse)
library(modelr)
library(rethinking)
xentropy <- function(p,q) -sum(p*log(q))
entropy <- function(p) xentropy(p,p)
divergence <- function(p,q) xentropy(p,q) - entropy(p)
rd <- c(0.3, 0.7)
md1 <- c(0.5, 0.5)
md2 <- c(0.1, 0.99)
entropy(rd) #0.6108643
divergence(rd, md1) #0.08228288
####################################################
df <- tibble(
q1 = seq(0.01, 0.99, 0.01),
q2 = 1 - q1,
p1 = 0.3,
p2 = 0.7,
entropy_p = -( p1*log(p1) + p2*log(p2)),
xentropy_pq = -( p1*log(q1) + p2*log(q2)),
div_pq = xentropy_pq - entropy_p
)
df %>% ggplot() +
geom_line(aes(q1, div_pq)) +
geom_vline(aes(xintercept = p1))
####################################################
data(cars)
#visualize
cars %>% ggplot() + geom_point(aes(speed,dist))
m <- map(
alist(
dist ~ dnorm(mu,sigma),
mu <- a + b*speed,
a ~ dnorm(0,100),
b ~ dnorm(0,10),
sigma ~ dunif(0,30)
) , data=cars
)
cars %>% ggplot() + geom_point(aes(speed,dist)) +
geom_abline(aes(intercept=coef(m)["a"] , slope=coef(m)["b"]))
n_samples <- 1000
sampled_parameters <- as_tibble(
mvrnorm(n= n_samples,
mu=coef(m),
Sigma=vcov(m))
)
# 50 in cars$speed * 1000 samples
ll <- sapply(seq_len(n_samples), function(i) {
mu <- sampled_parameters$a[i] + sampled_parameters$b[i]*cars$speed
dnorm( cars$dist , mu , sampled_parameters$sigma[i] , log=TRUE )})
lppd <- sapply(seq_len(nrow(cars)), function(i) log_sum_exp(ll[i,]) - log(n_samples)
)
pWAIC <- sapply(seq_len(nrow(cars)), function(i) var(ll[i,])
)
#compute WAIC
-2*( sum(lppd) - sum(pWAIC) )
avD <- -2*rowMeans(ll)
posteriorMean <- coef(m)['a'] + coef(m)['b']*cars$speed
#what to use for sigma?
#D_at_mean <- dnorm (cars$dist, posteriorMean, ) | /chapter6/notes.R | no_license | nzxwang/statistical_rethinking | R | false | false | 1,751 | r | library(MASS)
library(tidyverse)
library(modelr)
library(rethinking)
xentropy <- function(p,q) -sum(p*log(q))
entropy <- function(p) xentropy(p,p)
divergence <- function(p,q) xentropy(p,q) - entropy(p)
rd <- c(0.3, 0.7)
md1 <- c(0.5, 0.5)
md2 <- c(0.1, 0.99)
entropy(rd) #0.6108643
divergence(rd, md1) #0.08228288
####################################################
df <- tibble(
q1 = seq(0.01, 0.99, 0.01),
q2 = 1 - q1,
p1 = 0.3,
p2 = 0.7,
entropy_p = -( p1*log(p1) + p2*log(p2)),
xentropy_pq = -( p1*log(q1) + p2*log(q2)),
div_pq = xentropy_pq - entropy_p
)
df %>% ggplot() +
geom_line(aes(q1, div_pq)) +
geom_vline(aes(xintercept = p1))
####################################################
data(cars)
#visualize
cars %>% ggplot() + geom_point(aes(speed,dist))
m <- map(
alist(
dist ~ dnorm(mu,sigma),
mu <- a + b*speed,
a ~ dnorm(0,100),
b ~ dnorm(0,10),
sigma ~ dunif(0,30)
) , data=cars
)
cars %>% ggplot() + geom_point(aes(speed,dist)) +
geom_abline(aes(intercept=coef(m)["a"] , slope=coef(m)["b"]))
n_samples <- 1000
sampled_parameters <- as_tibble(
mvrnorm(n= n_samples,
mu=coef(m),
Sigma=vcov(m))
)
# 50 in cars$speed * 1000 samples
ll <- sapply(seq_len(n_samples), function(i) {
mu <- sampled_parameters$a[i] + sampled_parameters$b[i]*cars$speed
dnorm( cars$dist , mu , sampled_parameters$sigma[i] , log=TRUE )})
lppd <- sapply(seq_len(nrow(cars)), function(i) log_sum_exp(ll[i,]) - log(n_samples)
)
pWAIC <- sapply(seq_len(nrow(cars)), function(i) var(ll[i,])
)
#compute WAIC
-2*( sum(lppd) - sum(pWAIC) )
avD <- -2*rowMeans(ll)
posteriorMean <- coef(m)['a'] + coef(m)['b']*cars$speed
#what to use for sigma?
#D_at_mean <- dnorm (cars$dist, posteriorMean, ) |
###############################################################
### BATCH RECLASSIFY AND CLUMP RASTERS ##########
###############################################################
## Reclassify SIC rasters so 1=ice and 0=else. Use clump fxn to determine pack ice ##
# ----------------- Load Data --------------------------------------#
rm(list = ls())
library(raster)
#------------------ Create directories for new folders -------------#
#load('ded_ids.RData')
#for(i in 1:length(ded)){
#dir.create(paste0('./SIC-TIFs/SIC_univ_Bremen/RCC/', ded[i]))
#}
#setwd("D:/Polar Bears/Data/SIC-TIFs/SIC_univ_Bremen/n3125/All/RCC/Remaining")
path = "D:/Polar Bears/Data/SIC-TIFs/SIC_univ_Bremen/n3125/All/RCC/Remaining"
rl <- dir(path = path, pattern='.tif', all.files=TRUE, recursive = TRUE, full.names=FALSE)
m <- c(1,15,0, 15,100,1, 100,255,0)
rclmat <- matrix(m, ncol=3, byrow=TRUE)
batch_reclass <- function(rl){
for (i in 1:length(rl)) {
r <- raster(paste(path, rl, sep = '/')) #read in raster
rc <- reclassify(r, rclmat) #reclassify such that SIC>15% = 1, else 0
rcc <- clump(rc, directions=8) # clumping algorithm
writeRaster(rcc, filename = paste0("D:/Polar Bears/Data/SIC-TIFs/SIC_univ_Bremen/n3125/All/RCC/Remaining/",
rl), format="GTiff", overwrite=TRUE)
}
}
#run the function
batch_reclass(rl)
# ------------------------- TEST ------------------------------------------------#
r <- raster('./All/asi-n3125-20110930-v5.4.tif')
plot(r)
rc <- reclassify(r, rclmat)
plot(rc)
#writeRaster(test, './Tests/bat_clump.tif')
| /bat_reclass.R | no_license | anniekellner/ch1_landing | R | false | false | 1,608 | r | ###############################################################
### BATCH RECLASSIFY AND CLUMP RASTERS ##########
###############################################################
## Reclassify SIC rasters so 1=ice and 0=else. Use clump fxn to determine pack ice ##
# ----------------- Load Data --------------------------------------#
rm(list = ls())
library(raster)
#------------------ Create directories for new folders -------------#
#load('ded_ids.RData')
#for(i in 1:length(ded)){
#dir.create(paste0('./SIC-TIFs/SIC_univ_Bremen/RCC/', ded[i]))
#}
#setwd("D:/Polar Bears/Data/SIC-TIFs/SIC_univ_Bremen/n3125/All/RCC/Remaining")
path = "D:/Polar Bears/Data/SIC-TIFs/SIC_univ_Bremen/n3125/All/RCC/Remaining"
rl <- dir(path = path, pattern='.tif', all.files=TRUE, recursive = TRUE, full.names=FALSE)
m <- c(1,15,0, 15,100,1, 100,255,0)
rclmat <- matrix(m, ncol=3, byrow=TRUE)
batch_reclass <- function(rl){
for (i in 1:length(rl)) {
r <- raster(paste(path, rl, sep = '/')) #read in raster
rc <- reclassify(r, rclmat) #reclassify such that SIC>15% = 1, else 0
rcc <- clump(rc, directions=8) # clumping algorithm
writeRaster(rcc, filename = paste0("D:/Polar Bears/Data/SIC-TIFs/SIC_univ_Bremen/n3125/All/RCC/Remaining/",
rl), format="GTiff", overwrite=TRUE)
}
}
#run the function
batch_reclass(rl)
# ------------------------- TEST ------------------------------------------------#
r <- raster('./All/asi-n3125-20110930-v5.4.tif')
plot(r)
rc <- reclassify(r, rclmat)
plot(rc)
#writeRaster(test, './Tests/bat_clump.tif')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataManipulation.R
\name{calcHybridForce}
\alias{calcHybridForce}
\title{Generate the hybrid force functions given peak, location of peak, and cutoff}
\usage{
calcHybridForce(x = seq(-0.3, 2.5, 0.01), max_F = 0.2, peak_location = 1.1)
}
\arguments{
\item{x}{the x locations}
\item{max_F}{the peak force}
\item{peak_location}{the peak location}
}
\description{
Generate the hybrid force functions given peak, location of peak, and cutoff
}
| /man/calcHybridForce.Rd | no_license | clairemiller/chasteR | R | false | true | 519 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataManipulation.R
\name{calcHybridForce}
\alias{calcHybridForce}
\title{Generate the hybrid force functions given peak, location of peak, and cutoff}
\usage{
calcHybridForce(x = seq(-0.3, 2.5, 0.01), max_F = 0.2, peak_location = 1.1)
}
\arguments{
\item{x}{the x locations}
\item{max_F}{the peak force}
\item{peak_location}{the peak location}
}
\description{
Generate the hybrid force functions given peak, location of peak, and cutoff
}
|
plot.cv.gcdclust <-
function(x, sign.lambda = 1, ...) {
cvobj <- x
xlab <- "log(Lambda)"
if (sign.lambda < 0)
xlab <- paste("-", xlab, sep = "")
plot.args <- list(x = sign.lambda * log(cvobj$lambda), y = cvobj$cvm,
ylim = range(cvobj$cvupper, cvobj$cvlo), xlab = xlab,
ylab = cvobj$name, type = "n")
new.args <- list(...)
if (length(new.args))
plot.args[names(new.args)] <- new.args
do.call("plot", plot.args)
error.bars(sign.lambda * log(cvobj$lambda), cvobj$cvupper,
cvobj$cvlo, width = 0.02, col = "darkgrey")
points(sign.lambda * log(cvobj$lambda), cvobj$cvm, pch = 20,
col = "red")
axis(side = 3, at = sign.lambda * log(cvobj$lambda), labels = paste(cvobj$nz),
tick = FALSE, line = 0)
abline(v = sign.lambda * log(cvobj$lambda.min), lty = 3)
abline(v = sign.lambda * log(cvobj$lambda.1se), lty = 3)
invisible()
}
| /R/plot.cv.gcdclust.R | no_license | KarimOualkacha/HHSVM-ClusterNet | R | false | false | 936 | r | plot.cv.gcdclust <-
function(x, sign.lambda = 1, ...) {
cvobj <- x
xlab <- "log(Lambda)"
if (sign.lambda < 0)
xlab <- paste("-", xlab, sep = "")
plot.args <- list(x = sign.lambda * log(cvobj$lambda), y = cvobj$cvm,
ylim = range(cvobj$cvupper, cvobj$cvlo), xlab = xlab,
ylab = cvobj$name, type = "n")
new.args <- list(...)
if (length(new.args))
plot.args[names(new.args)] <- new.args
do.call("plot", plot.args)
error.bars(sign.lambda * log(cvobj$lambda), cvobj$cvupper,
cvobj$cvlo, width = 0.02, col = "darkgrey")
points(sign.lambda * log(cvobj$lambda), cvobj$cvm, pch = 20,
col = "red")
axis(side = 3, at = sign.lambda * log(cvobj$lambda), labels = paste(cvobj$nz),
tick = FALSE, line = 0)
abline(v = sign.lambda * log(cvobj$lambda.min), lty = 3)
abline(v = sign.lambda * log(cvobj$lambda.1se), lty = 3)
invisible()
}
|
TCGAanalyze_ImmuneSubtypes <- function(ImmuneMW,
dataGE){
dataImmuneGroups_merged <- matrix(0,ncol(dataGE),ncol(ImmuneMW))
rownames(dataImmuneGroups_merged) <- colnames(dataGE)
colnames(dataImmuneGroups_merged) <- colnames(ImmuneMW)
dataImmuneGroups_merged <- as.data.frame(dataImmuneGroups_merged)
for( i in 1: ncol(ImmuneMW)){
cursubtype <- colnames(ImmuneMW)[i]
print(cursubtype)
ImmuneMW_cur <- ImmuneMW[,cursubtype]
reads <- dataGE
X <- reads
w <- ImmuneMW_cur
commonStemsigGenes <- intersect(names(w),rownames(X))
X <- X[commonStemsigGenes,]
w <- w[ rownames(X) ]
# Score the Matrix X using Spearman correlation.
s <- apply( X, 2, function(z) {cor( z, w, method = "sp", use = "complete.obs" )} )
## Scale the scores to be between 0 and 1
s <- s - min(s)
s <- s / max(s)
dataSce_immuneSubtypes <- cbind(s)
dataSce_immuneSubtypes <- as.data.frame(dataSce_immuneSubtypes)
colnames(dataSce_immuneSubtypes) <- cursubtype
dataImmuneGroups_merged[rownames(dataSce_immuneSubtypes),cursubtype] <- dataSce_immuneSubtypes[,1]
}
dataImmuneSubtypes <- matrix(0,nrow(dataImmuneGroups_merged),2)
rownames(dataImmuneSubtypes) <- rownames(dataImmuneGroups_merged)
colnames(dataImmuneSubtypes) <- c("Sample","ImmuneSubtype")
dataImmuneSubtypes <- as.data.frame(dataImmuneSubtypes)
dataImmuneSubtypes$Sample <- rownames(dataImmuneSubtypes)
for( j in 1:nrow(dataImmuneSubtypes)){
cursample <- dataImmuneSubtypes$Sample[j]
idx <- which(dataImmuneGroups_merged[cursample,] == max(dataImmuneGroups_merged[cursample,]))
if( length(idx)!=1) {
idx <- idx[1]
}
dataImmuneSubtypes[cursample,"ImmuneSubtype"] <- colnames(dataImmuneGroups_merged)[idx]
}
return(dataImmuneSubtypes)
}
| /inst/scripts/TCGAanalyze_ImmuneSubtypes.R | no_license | TransBioInfoLab/Bioc2019PanCancerStudy | R | false | false | 1,906 | r | TCGAanalyze_ImmuneSubtypes <- function(ImmuneMW,
dataGE){
dataImmuneGroups_merged <- matrix(0,ncol(dataGE),ncol(ImmuneMW))
rownames(dataImmuneGroups_merged) <- colnames(dataGE)
colnames(dataImmuneGroups_merged) <- colnames(ImmuneMW)
dataImmuneGroups_merged <- as.data.frame(dataImmuneGroups_merged)
for( i in 1: ncol(ImmuneMW)){
cursubtype <- colnames(ImmuneMW)[i]
print(cursubtype)
ImmuneMW_cur <- ImmuneMW[,cursubtype]
reads <- dataGE
X <- reads
w <- ImmuneMW_cur
commonStemsigGenes <- intersect(names(w),rownames(X))
X <- X[commonStemsigGenes,]
w <- w[ rownames(X) ]
# Score the Matrix X using Spearman correlation.
s <- apply( X, 2, function(z) {cor( z, w, method = "sp", use = "complete.obs" )} )
## Scale the scores to be between 0 and 1
s <- s - min(s)
s <- s / max(s)
dataSce_immuneSubtypes <- cbind(s)
dataSce_immuneSubtypes <- as.data.frame(dataSce_immuneSubtypes)
colnames(dataSce_immuneSubtypes) <- cursubtype
dataImmuneGroups_merged[rownames(dataSce_immuneSubtypes),cursubtype] <- dataSce_immuneSubtypes[,1]
}
dataImmuneSubtypes <- matrix(0,nrow(dataImmuneGroups_merged),2)
rownames(dataImmuneSubtypes) <- rownames(dataImmuneGroups_merged)
colnames(dataImmuneSubtypes) <- c("Sample","ImmuneSubtype")
dataImmuneSubtypes <- as.data.frame(dataImmuneSubtypes)
dataImmuneSubtypes$Sample <- rownames(dataImmuneSubtypes)
for( j in 1:nrow(dataImmuneSubtypes)){
cursample <- dataImmuneSubtypes$Sample[j]
idx <- which(dataImmuneGroups_merged[cursample,] == max(dataImmuneGroups_merged[cursample,]))
if( length(idx)!=1) {
idx <- idx[1]
}
dataImmuneSubtypes[cursample,"ImmuneSubtype"] <- colnames(dataImmuneGroups_merged)[idx]
}
return(dataImmuneSubtypes)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_timeline_label.R
\name{geom_timeline_label}
\alias{geom_timeline_label}
\title{geom_timeline_label}
\usage{
geom_timeline_label(
mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...
)
}
\arguments{
\item{mapping}{ggplot item}
\item{data}{ggplot item}
\item{stat}{ggplot item}
\item{position}{ggplot item}
\item{na.rm}{ggplot item}
\item{show.legend}{ggplot item}
\item{inherit.aes}{ggplot item}
\item{...}{ggplot item}
}
\value{
geom timeline label function
}
\description{
geom_timeline_label
}
\examples{
\dontrun{geom_timeline_label()}
}
| /man/geom_timeline_label.Rd | permissive | A0791298/CourseraCapstone | R | false | true | 722 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_timeline_label.R
\name{geom_timeline_label}
\alias{geom_timeline_label}
\title{geom_timeline_label}
\usage{
geom_timeline_label(
mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...
)
}
\arguments{
\item{mapping}{ggplot item}
\item{data}{ggplot item}
\item{stat}{ggplot item}
\item{position}{ggplot item}
\item{na.rm}{ggplot item}
\item{show.legend}{ggplot item}
\item{inherit.aes}{ggplot item}
\item{...}{ggplot item}
}
\value{
geom timeline label function
}
\description{
geom_timeline_label
}
\examples{
\dontrun{geom_timeline_label()}
}
|
#-----------------------------------------------------------------------------#
# #
# QUALITY CONTROL STATISTICS IN R #
# #
# An R package for statistical in-line quality control. #
# #
# Written by: Miguel A. Flores Sanchez #
# Professor of the Mathematics Department #
# Escuela Politecnica Nacional, Ecuador #
# miguel.flores@epn.edu.ec #
# #
#-----------------------------------------------------------------------------#
#-------------------------------------------------------------------------
# cusum chart
#-------------------------------------------------------------------------
##' Function to plot the cusum chart
##'
##' This function is used to compute statistics required by the cusum chart.
##'
##' @param x Object qcd (Quality Control Data)
##' @param ... arguments passed to or from methods.
##' @export
##' @examples
##'
##' library(qcr)
##' data(pistonrings)
##' attach(pistonrings)
##' res.qcd <- qcd(pistonrings, type.data = "dependence")
##' res.qcs <- qcs.cusum(res.qcd, type = "cusum")
##' summary(res.qcs)
##' plot(res.qcs)
##'
qcs.cusum <- function(x, ...) {
UseMethod("qcs.cusum")
}
##' @rdname qcs.cusum
##' @method qcs.cusum default
##' @inheritParams qcd
##' @param sizes a value or a vector of values specifying the sample sizes
##' associated with each group.
##' @param center a value specifying the center of group statistics or the
##' ''target'' value of the process.
##' @param std.dev a value or an available method specifying the within-group
##' standard deviation(s) of the process. \cr Several methods are available for
##' estimating the standard deviation.
##' @param decision.interval A numeric value specifying the number of standard
##' errors of the summary statistics at which the cumulative sum is out of
##' control.
##' @param se.shift The amount of shift to detect in the process, measured in
##' standard errors of the summary statistics.
##' @param plot a logical value indicating it should be plotted.
##' @export
##'
qcs.cusum.default <- function(x, var.index = 1, sample.index = 2,
covar.index = NULL, covar.names = NULL,
data.name = NULL,
sizes = NULL,
center = NULL, std.dev = NULL,
decision.interval = 5,
se.shift = 1, plot = FALSE, ...)
{
obj<-qcd(data = x, var.index = var.index, sample.index = sample.index,
covar.index = covar.index, covar.names = covar.names,
data.name = data.name, sizes = sizes, type.data = "dependence")
result<-qcs.cusum.qcd(x = obj, center = center, std.dev = std.dev,
decision.interval = decision.interval,
se.shift = se.shift, plot = plot)
return(result)
}
##' @rdname qcs.cusum
##' @method qcs.cusum qcd
##' @inheritParams qcs.cusum.default
##' @export
##'
qcs.cusum.qcd <- function(x, center = NULL,
std.dev = NULL,
decision.interval = 5, se.shift = 1, plot = FALSE, ...) {
#.........................................................................
if(is.null(x) || !inherits(x, "qcd"))
stop("data must be an objects of class (or extending) 'qcd'")
sizes <- x$sizes
type.data <- "dependence"
std <- if(any(sizes==1)) "xbar.one" else "xbar"
if(is.null(std.dev))
{ std.dev <- switch(std,
"xbar" = { if(any(sizes > 25)) "RMSDF"
else "UWAVE-R" },
"xbar.one" = "MR")
}
qcs<-qcs(x = x$x, sample.index = x$sample, sizes = sizes,
center = center, std.dev = std.dev, type = "cusum",
decision.interval = decision.interval, se.shift = se.shift, type.data = type.data)
center <- qcs$center
cusum <- qcs$statistics
std.dev <- qcs$std.dev
sizes <- qcs$sizes
limits <- qcs$limits
violations <- qcs$violations
pos <- qcs$pos
neg <- qcs$neg
decision.interval <- qcs$decision.interval
se.shift <- qcs$se.shift
statistics <- data.frame(cusum)
m <- length(x)
sample <- x$sample
if (m > 3) {
new.x <- x[, -c(1, 2, length(x))]
cov <- apply(new.x, 2, function(x) unlist(lapply(split(x, sample), unique)))
statistics <- data.frame(cusum, cov)
}
row.names(statistics) <- unique(x$sample)
data.name <- attr(x, "data.name")
result <- list(qcd = x, type = "cusum", statistics = statistics,
center = center, std.dev = std.dev,
limits = limits,
sizes = sizes, data.name = data.name,
violations = violations, pos = pos, neg = neg,
decision.interval = decision.interval,
se.shift = se.shift)
oldClass(result) <- c("qcs.cusum", "qcs")
if(plot) plot(result, ...)
return(result)
#.........................................................................
} # qcs.cusum.qcd | /R/qcs.cusum.r | no_license | cran/qcr | R | false | false | 5,659 | r | #-----------------------------------------------------------------------------#
# #
# QUALITY CONTROL STATISTICS IN R #
# #
# An R package for statistical in-line quality control. #
# #
# Written by: Miguel A. Flores Sanchez #
# Professor of the Mathematics Department #
# Escuela Politecnica Nacional, Ecuador #
# miguel.flores@epn.edu.ec #
# #
#-----------------------------------------------------------------------------#
#-------------------------------------------------------------------------
# cusum chart
#-------------------------------------------------------------------------
##' Function to plot the cusum chart
##'
##' This function is used to compute statistics required by the cusum chart.
##'
##' @param x Object qcd (Quality Control Data)
##' @param ... arguments passed to or from methods.
##' @export
##' @examples
##'
##' library(qcr)
##' data(pistonrings)
##' attach(pistonrings)
##' res.qcd <- qcd(pistonrings, type.data = "dependence")
##' res.qcs <- qcs.cusum(res.qcd, type = "cusum")
##' summary(res.qcs)
##' plot(res.qcs)
##'
qcs.cusum <- function(x, ...) {
UseMethod("qcs.cusum")
}
##' @rdname qcs.cusum
##' @method qcs.cusum default
##' @inheritParams qcd
##' @param sizes a value or a vector of values specifying the sample sizes
##' associated with each group.
##' @param center a value specifying the center of group statistics or the
##' ''target'' value of the process.
##' @param std.dev a value or an available method specifying the within-group
##' standard deviation(s) of the process. \cr Several methods are available for
##' estimating the standard deviation.
##' @param decision.interval A numeric value specifying the number of standard
##' errors of the summary statistics at which the cumulative sum is out of
##' control.
##' @param se.shift The amount of shift to detect in the process, measured in
##' standard errors of the summary statistics.
##' @param plot a logical value indicating it should be plotted.
##' @export
##'
qcs.cusum.default <- function(x, var.index = 1, sample.index = 2,
covar.index = NULL, covar.names = NULL,
data.name = NULL,
sizes = NULL,
center = NULL, std.dev = NULL,
decision.interval = 5,
se.shift = 1, plot = FALSE, ...)
{
obj<-qcd(data = x, var.index = var.index, sample.index = sample.index,
covar.index = covar.index, covar.names = covar.names,
data.name = data.name, sizes = sizes, type.data = "dependence")
result<-qcs.cusum.qcd(x = obj, center = center, std.dev = std.dev,
decision.interval = decision.interval,
se.shift = se.shift, plot = plot)
return(result)
}
##' @rdname qcs.cusum
##' @method qcs.cusum qcd
##' @inheritParams qcs.cusum.default
##' @export
##'
qcs.cusum.qcd <- function(x, center = NULL,
std.dev = NULL,
decision.interval = 5, se.shift = 1, plot = FALSE, ...) {
#.........................................................................
if(is.null(x) || !inherits(x, "qcd"))
stop("data must be an objects of class (or extending) 'qcd'")
sizes <- x$sizes
type.data <- "dependence"
std <- if(any(sizes==1)) "xbar.one" else "xbar"
if(is.null(std.dev))
{ std.dev <- switch(std,
"xbar" = { if(any(sizes > 25)) "RMSDF"
else "UWAVE-R" },
"xbar.one" = "MR")
}
qcs<-qcs(x = x$x, sample.index = x$sample, sizes = sizes,
center = center, std.dev = std.dev, type = "cusum",
decision.interval = decision.interval, se.shift = se.shift, type.data = type.data)
center <- qcs$center
cusum <- qcs$statistics
std.dev <- qcs$std.dev
sizes <- qcs$sizes
limits <- qcs$limits
violations <- qcs$violations
pos <- qcs$pos
neg <- qcs$neg
decision.interval <- qcs$decision.interval
se.shift <- qcs$se.shift
statistics <- data.frame(cusum)
m <- length(x)
sample <- x$sample
if (m > 3) {
new.x <- x[, -c(1, 2, length(x))]
cov <- apply(new.x, 2, function(x) unlist(lapply(split(x, sample), unique)))
statistics <- data.frame(cusum, cov)
}
row.names(statistics) <- unique(x$sample)
data.name <- attr(x, "data.name")
result <- list(qcd = x, type = "cusum", statistics = statistics,
center = center, std.dev = std.dev,
limits = limits,
sizes = sizes, data.name = data.name,
violations = violations, pos = pos, neg = neg,
decision.interval = decision.interval,
se.shift = se.shift)
oldClass(result) <- c("qcs.cusum", "qcs")
if(plot) plot(result, ...)
return(result)
#.........................................................................
} # qcs.cusum.qcd |
setwd("~/Personal Training")
#Reads in data from file then subsets data for specified dates
data <- read.table("household_power_consumption.txt", na.strings="?", sep = ';', header = T)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Filter Dates for 2007-02-01 and 2007-02-02
data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
## Plot 1
plot(data$Sub_metering_1 ~ as.POSIXct(data$datetime), type = "l", ylab = "Energy sub metering", xlab = "")
lines(data$Sub_metering_2 ~ as.POSIXct(data$datetime), col = "Red")
lines(data$Sub_metering_3 ~ as.POSIXct(data$datetime), col = "Blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Save file
dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
| /plot 3.R | no_license | Zucethy/Coursera-EDA | R | false | false | 938 | r | setwd("~/Personal Training")
#Reads in data from file then subsets data for specified dates
data <- read.table("household_power_consumption.txt", na.strings="?", sep = ';', header = T)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Filter Dates for 2007-02-01 and 2007-02-02
data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
## Plot 1
plot(data$Sub_metering_1 ~ as.POSIXct(data$datetime), type = "l", ylab = "Energy sub metering", xlab = "")
lines(data$Sub_metering_2 ~ as.POSIXct(data$datetime), col = "Red")
lines(data$Sub_metering_3 ~ as.POSIXct(data$datetime), col = "Blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Save file
dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{fcn.indirect}
\alias{fcn.indirect}
\title{fcn.indirect}
\usage{
fcn.indirect(fcn, x)
}
\arguments{
\item{fcn}{= a function to apply}
\item{x}{= vector of function names}
}
\description{
applies <fcn> recursively
}
\seealso{
Other fcn: \code{\link{fcn.all.canonical}},
\code{\link{fcn.all.roxygenize}},
\code{\link{fcn.all.sub}}, \code{\link{fcn.all.super}},
\code{\link{fcn.args.actual}},
\code{\link{fcn.canonical}}, \code{\link{fcn.clean}},
\code{\link{fcn.comments.parse}},
\code{\link{fcn.dates.parse}}, \code{\link{fcn.date}},
\code{\link{fcn.direct.sub}},
\code{\link{fcn.direct.super}}, \code{\link{fcn.dir}},
\code{\link{fcn.expressions.count}},
\code{\link{fcn.extract.args}},
\code{\link{fcn.extract.out}}, \code{\link{fcn.has}},
\code{\link{fcn.indent.decrease}},
\code{\link{fcn.indent.else}},
\code{\link{fcn.indent.ignore}},
\code{\link{fcn.indent.increase}},
\code{\link{fcn.indent.proper}},
\code{\link{fcn.lines.code}},
\code{\link{fcn.lines.count}}, \code{\link{fcn.list}},
\code{\link{fcn.lite}}, \code{\link{fcn.mat.col}},
\code{\link{fcn.mat.num}}, \code{\link{fcn.mat.vec}},
\code{\link{fcn.nonNA}}, \code{\link{fcn.num.nonNA}},
\code{\link{fcn.order}}, \code{\link{fcn.path}},
\code{\link{fcn.roxygenize}}, \code{\link{fcn.sho}},
\code{\link{fcn.simple}}, \code{\link{fcn.to.comments}},
\code{\link{fcn.to.txt}}, \code{\link{fcn.vec.grp}},
\code{\link{fcn.vec.num}}
}
\keyword{fcn.indirect}
| /man/fcn.indirect.Rd | no_license | vsrimurthy/EPFR | R | false | true | 1,558 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{fcn.indirect}
\alias{fcn.indirect}
\title{fcn.indirect}
\usage{
fcn.indirect(fcn, x)
}
\arguments{
\item{fcn}{= a function to apply}
\item{x}{= vector of function names}
}
\description{
applies <fcn> recursively
}
\seealso{
Other fcn: \code{\link{fcn.all.canonical}},
\code{\link{fcn.all.roxygenize}},
\code{\link{fcn.all.sub}}, \code{\link{fcn.all.super}},
\code{\link{fcn.args.actual}},
\code{\link{fcn.canonical}}, \code{\link{fcn.clean}},
\code{\link{fcn.comments.parse}},
\code{\link{fcn.dates.parse}}, \code{\link{fcn.date}},
\code{\link{fcn.direct.sub}},
\code{\link{fcn.direct.super}}, \code{\link{fcn.dir}},
\code{\link{fcn.expressions.count}},
\code{\link{fcn.extract.args}},
\code{\link{fcn.extract.out}}, \code{\link{fcn.has}},
\code{\link{fcn.indent.decrease}},
\code{\link{fcn.indent.else}},
\code{\link{fcn.indent.ignore}},
\code{\link{fcn.indent.increase}},
\code{\link{fcn.indent.proper}},
\code{\link{fcn.lines.code}},
\code{\link{fcn.lines.count}}, \code{\link{fcn.list}},
\code{\link{fcn.lite}}, \code{\link{fcn.mat.col}},
\code{\link{fcn.mat.num}}, \code{\link{fcn.mat.vec}},
\code{\link{fcn.nonNA}}, \code{\link{fcn.num.nonNA}},
\code{\link{fcn.order}}, \code{\link{fcn.path}},
\code{\link{fcn.roxygenize}}, \code{\link{fcn.sho}},
\code{\link{fcn.simple}}, \code{\link{fcn.to.comments}},
\code{\link{fcn.to.txt}}, \code{\link{fcn.vec.grp}},
\code{\link{fcn.vec.num}}
}
\keyword{fcn.indirect}
|
library(shinydashboard)
library(shinydashboardPlus)
library(DT)
library(plotly)
library(shinycssloaders)
# Static code is in the helper.R file. This includes reading in the initial data set and cleaning and also
# easily callable subsets of the variables and variable names. See helper.R for more information.
source("helper.R")
fluidPage(dashboardPage(
skin = "red",
dashboardHeader(title = "Video Game Trends"),
dashboardSidebar(
sidebarMenu(
menuItem("About", tabName = "About", icon = icon("question")),
menuItem("Data", tabName = "Data", icon = icon("th")),
menuItem("Data Exploration", tabName = "DataExploration", icon = icon("binoculars")),
menuItem("Modeling", tabName = "Modeling", icon = icon("chart-area"))
)
),
dashboardBody(
tabItems(
# About page content
tabItem(tabName = "About",
h2("About Page"),
mainPanel(
h3("About this App"),
p("This app will allow users to explore the sales and ratings data for different video games. The ratings are from Metacritic and the sales data is from vgchartz.",
"The goal is to see if there are any trends or correlations between ratings, sales and publisher/developer and platform."),
br(),
h3("About the Data"),
h4("This video game data set was taken from",
a(href="https://www.kaggle.com/rush4ratio/video-game-sales-with-ratings", "Kaggle"),
"and is called ", em("Video Game Sales with Ratings")),
img(src = "psvita.png", height = 300, width = 300),
p("Image of the Playstation Vita, from", a(href="https://kotaku.com/fans-are-finally-coming-to-terms-with-the-vitas-death-1833298145", "Kotaku.com.")),
p("There are 16 variables in this dataset: Name, Platform, Year of Release, Genre, Publisher, North American Sales (NA_Sales), European Sales (EU_Sales),",
"Japanese Sales (JP_Sales), Rest of World Sales (Other_Sales), Global Sales, Critic Score, Critic Count, User Score, User Count, and Rating.",
"All sales data is in millions of dollars and Global sales is the sum of all the rest of the sales data, critic scores are 1-100 while user scores are 1.0-10.0.",
"The user and critic counts are the number of scores given. The original dataset contained 16,719 observations. This number has been reduced by removing",
"observations that fall into categories with less than 10 observations total. Further, only the top 15 game publishers are included in this dataset. Through this",
"reduction there are now 4,622 different observations in this set for games that range the gambit of genres. Two tables below show",
"the short hand and long form of the variables Platform, which is the hardware the game was designed to run on, and the game's Rating."),
tableOutput("platTable"),
tableOutput("rateTable"),
br(),
h3("About the Other Pages"),
h4("Data"),
p("This page allows the user to look through and filter or subset the data as desired.",
"It also allows the user to download a .csv of either the full dataset or the filtered/subsetted data that they chose.",
"The full original dataset from Kaggle is also available in the GitHub repo for this shiny app."),
br(),
h4("Data Exploration"),
p("This page allows for exploratory analysis of the data, including the creation of different plots and summaries.",
"The summary statistics and correlation for differenet variables can be obtained. Three other plots are included:",
"barplot, violin plot and scatterplot. These plots are able to be filtered and have different variables selected.",
"There is also the ability to download the selected plot."),
br(),
h4("Modeling"),
p("The Modeling page has three different tabs: Information, Fitting and Prediction. The Information tab explains the models used, ",
"the Fitting tab allows the user to select different inputs for the models and the Prediction tab will predict the rating of a ",
"game based on teh full model and selected user inputs."),
br(),
)
),
# Data page content
tabItem(tabName = "Data",
h2("Data"),
mainPanel(
selectizeInput("dataFilt", "Filter Table", choices =
list("No Filter" = " ", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr,
"Publisher" = uPubl,"Rating" = uRatg), multiple = FALSE),
selectizeInput("DataSel", "Select Desired Columns", choices = allVars, selected = allVars, multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
downloadButton("saveData", "Save Current Data Set"),
dataTableOutput("allData", width = "1000px")
)
),
# Data Exploration page content
tabItem(tabName = "DataExploration",
h2("Data Exploration Fun"),
mainPanel(
# Drop down menu for the desired output
selectInput("plotSum", "Select the desired plot or summary", choices = dataInputs),
# Action button to save desired plots
conditionalPanel(
condition = "input.plotSum != 'Summary Statistics' & input.plotSum != 'Scatterplot'",
downloadButton("savePlotSum", "Download")
),
# Summary statistics of the data set
conditionalPanel(
condition = "input.plotSum == 'Summary Statistics'",
h3("Summary Statistics"),
fluidRow(
column(6,
selectizeInput("sumOpts", "Variables for the Summary Statistics",
choices = allVars[-c(1, 2, 4, 5, 15)], selected = allVars[-c(1, 2, 4, 5, 15)], multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
),
column(6,
selectInput("pickSum", "Summary", choices = c("Minimum and Maximum", "Quantiles", "Interquartile Range", "Mean and Median"),
selected = "Minimum and Maximum")
)
),
verbatimTextOutput("sumData")
),
# Check boxes for user input and the corresponding correlation plot
conditionalPanel(
condition = "input.plotSum == 'Correlation Plot'",
h3("Correlation Plot"),
checkboxGroupInput("corOpts", "Variables for the Correlation Plot", choices = corVars,
selected = corVars, inline = TRUE),
plotOutput("corPlot"),
p("**Please note that Global Sales is the sum of all other sales and so it's expected to be highly correlated",
"with the other sales data.")
),
# Barplot
conditionalPanel(
condition = "input.plotSum == 'Barplot'",
h3("Barplot"),
# Options for filtering
selectInput("filtBar", "Filter Observations", choices =
list("No Filter" = "No Filter", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr, "Publisher" = uPubl,
"Rating" = uRatg)),
selectInput("facts", "Select the Variable of interest for the Barplot", choices = barVars, selected = barVars[2]),
plotOutput("bar", width = "100%")
),
# Violin Plot
conditionalPanel(
condition = "input.plotSum == 'Violin Plot'",
h3("Violin Plot"),
fluidRow(box(
# Options for filtering
selectInput("filtVio", "Filter Observations", choices =
list("No Filter" = "No Filter", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr, "Publisher" = uPubl,
"Rating" = uRatg)), width = 4),
box(selectInput("xVio", "Select the 'X' variable", choices = barVars, selected = barVars[4]), width = 4),
box(selectInput("yVio", "Select the 'Y' variable", choices = numVars, selected = numVars[7]), width = 4),
box(plotOutput("violin"), width = 12)
)
),
# Scatterplot
conditionalPanel(
condition = "input.plotSum == 'Scatterplot'",
p("Please use the plotly download button to save a png of the plot."),
h3("Scatterplot"),
fluidRow(box(
# Options for filtering
selectInput("filtSca", "Filter Observations", choices =
list("No Filter" = "No Filter", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr, "Publisher" = uPubl,
"Rating" = uRatg)), width = 4),
box(selectInput("xSca", "Select the 'X' variable", choices = numVars, selected = numVars[1]), width = 4),
box(selectInput("ySca", "Select the 'Y' variable", choices = numVars, selected = numVars[7]), width = 4),
),
# Scatterplot output
box(plotlyOutput("scatter"), width = 12)
)
)
),
# Modeling page content
tabItem(tabName = "Modeling",
tags$head(
tags$style(
HTML(".shiny-notification {
position:fixed;
top: calc(50%);
left: calc(25%);
}"
)
),
),
h2("Modeling Content"),
mainPanel(
# Output: Tabset with modeling information, fitting and prediction
tabsetPanel(type = "tabs",
tabPanel("Modeling Info",
br(),
p("This app uses the caret package to fit all models."),
h2("Multiple Linear Regression"),
p("Method in caret: lm"),
h3("Benefits"),
p("Multiple Linear Regression is used for data with a continuous response and two or more predictor variables.",
"In this app there is only a single response variable.",
"This type of model assumes that there is a linear relationship between the response and predictors."),
h3("Model"),
withMathJax(helpText("$$y_i=\\beta_{0}+\\beta_{1}\ x_{i1}+\\beta_{2}\ x_{i2}+\\cdots+\\beta_{p}\ x_{ip}+\\epsilon_{i}$$")),
withMathJax(helpText("$$i=1,\\cdots,n$$")),
p("Where the response variable:"),
withMathJax(helpText("$$y_i$$")),
p("Coefficients:"),
withMathJax(helpText("$$\\beta_p$$")),
p("Predictor variables:"),
withMathJax(helpText("$$x_{ip}$$")),
p("Error term:"),
withMathJax(helpText("$$\\epsilon_i$$")),
h3("Drawbacks"),
p("Linearity of the relationship between the predictors and response is a needed assumption and real world ",
"data is doesn't necessarily have this linear relationship. Becasue of this, the model may not be the best ",
"fit for the data, or may give misleading results."),
h2("Regression Tree Model"),
p("Method in caret: rpart"),
h3("Benefits"),
p("With tree based models, there is no assumption of linearity or other relationships. This allows for a ",
"wider application on different sets of data. Further, the interpretability is high with easy to ",
"understand graphics associated with these models."),
h3("Determining Split"),
p("The way that decision trees are able to regress or classify is by splitting the predictor space. The following ",
"equation is the way they do this:"),
withMathJax(helpText("$$\\sum_{j=1}^J\ \\sum_{i\\in{R_j}}\ (y_i - \\hat{y}_{R_j})^2$$")),
h3("Drawbacks"),
p("The tree models are greedy. This means that the model will make the best split at that moment, and not consider a worse split",
"now to have an even better outcome further down the line. This can keep the tree from giving the best model.",
"Trees like this are very vulnerable to the split of data. Here, the random seed is 13 and all splits use",
"this seed. This reduces variablility between the data splits for building the tree. It may also be a lot",
"easier to overfit, this is where pruning comes into play."),
h2("Random Forest"),
p("Method in caret: ranger"),
h3("Benefits"),
p("Random forest is a type of bootstrap aggregated tree model. Many, many trees are fit then aggregated. These trees will have a much lower ",
"correlation to one another because of how the splits are produced. Only m predictors are used per split."),
h3("m Predictors"),
p("To use random forest, the total number of predictors, p, is reduced to the number of candidate predictors per split, m. The method known as 'Bagging'",
"is a special case in that m equals the total number of p predictors. Below we can see the rule of thumb for m when doing regression with random forest."),
withMathJax(helpText("$$m \\approx\ \\frac{p}{3} $$")),
h3("Drawbacks"),
p("The biggest drawback of any aggregated tree method is the loss interpretability that the single regression or classification tree possesses.")
),
tabPanel("Model Fitting",
h2(),
fluidRow(column(4,
wellPanel(
sliderInput("split", "Percentage of Data for the Training Set", min = 50,
max = 85, value = 75, step = 1),
actionButton("run", "Run Models"),
selectInput("resp", "Response Variable", choices = numVars[-2], selected = numVars[1]),
checkboxGroupInput("pred", "Predictor Variables", choices = allVars[c(2:5, 7:15)],
selected = allVars[c(3, 4, 11)]),
selectInput("cv", "Please Select a Cross Validation Method", choices =
c("Cross Validation" = "cv", "Repeated Cross Validation" = "repeatedcv")),
sliderInput("cvNum", "Number of Folds", min = 3, max = 20, value = 10, step = 1),
selectInput("cvRep", "Number of Repeats for Repeated CV", choices = c(2, 3, 4, 5, 6)),
p("Random Forest Tuning"),
selectizeInput("mtryNum", "Number of Variables to Try", choices = 2:67, selected = 7, multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
selectizeInput("sRule", "SplitRule", choices = c("variance", "extratrees"), selected = "variance", multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
selectizeInput("minNode", "Minimum Node Size", choices = c(4, 5, 6), selected = 5, multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE))
)
),
column(8,
h3("Multiple Linear Regression Model Fit Summary"),
verbatimTextOutput("mlrModel"),
h3("Multiple Linear Regression Model Fit Error on Test Set"),
verbatimTextOutput("mlrErr"),
h3("Regression Tree Fit Summary"),
verbatimTextOutput("regTree"),
h3("Regression Tree Fit Error on Test Set"),
verbatimTextOutput("treeErr"),
h3("Random Forest Fit Summary"),
verbatimTextOutput("randForest"),
h3("Random Forest Fit Error On Test Set"),
withSpinner(verbatimTextOutput("rfErr"))
)
)
),
tabPanel("Prediction",
h3("Prediction for Video Game"),
p("To predict a response based on the model described in the Model Fitting Tab, please select that in the 'Model' dropdown below,",
"else the full model will be fit and predicted on. The default tuning caret uses will be used in the full models."),
fluidRow(
column(3,
selectInput("predMod", "Prediction Model", choices = c("Multiple Linear Regression", "Regression Tree", "Random Forest"))
),
column(3,
selectInput("modPref", "Model", choices = c("Model Fitting Tab", "Full Model"), selected = "Full Model")
),
column(3,
selectInput("regResp", "Response to Predict", choices = numVars[c(-7, -9)], selected = numVars[1])
),
column(3,
actionButton("predButton", "Predict!")
)
),
br(),
p("Use the options below to input various options for prediction."),
fluidRow(
column(4,
selectInput("plat", "Platform", choices = uPlat, selected = uPlat[9])
),
column(4,
selectInput("year", "Year_of_Release", choices = uYear, selected = uYear[1])
),
column(4,
selectInput("genre", "Genre", choices = uGenr, selected = uGenr[8])
)
),
fluidRow(
column(4,
selectInput("publ", "Publisher", choices = uPubl, selected = uPubl[8])
),
column(4,
selectInput("ratg", "Rating", choices = uRatg, selected = uRatg[3])
),
column(4,
numericInput("critS", "Critic Score", value = 50, min = 0, max = 100, step = 1)
),
),
fluidRow(
column(3,
numericInput("critC", "Critic Count", value = max(games$Critic_Count)/2, min = min(games$Critic_Count),
max = max(games$Critic_Count), step = 1)
),
column(3,
numericInput("useS", "User Score", value = 5, min = 0, max = 10, step = 0.1)
),
column(3,
numericInput("useC", "User Count", value = 5000, min = min(games$User_Count), max = max(games$User_Count),
step = 1)
),
column(3,
sliderInput("naSal", "North American Sales", value = max(games$NA_Sales)/2, min = 0,
max = max(games$NA_Sales), step = 0.01)
)
),
fluidRow(
column(3,
sliderInput("euSal", "European Sales", value = max(games$EU_Sales)/2, min = 0,
max = max(games$EU_Sales), step = 0.01)
),
column(3,
sliderInput("jpSal", "Japanese Sales", value = max(games$JP_Sales)/2, min = 0,
max = max(games$JP_Sales), step = 0.01)
),
column(3,
sliderInput("otSal", "Other Sales", value = max(games$Other_Sales)/2, min = 0,
max = max(games$Other_Sales), step = 0.01)
),
column(3,
sliderInput("glSal", "Global Sales", value = max(games$Global_Sales)/2, min = 0,
max = max(games$Global_Sales), step = 0.01)
)
),
conditionalPanel(
condition = "input.predMod == 'Multiple Linear Regression'",
withSpinner(verbatimTextOutput("mlrPred"),
proxy.height = "100px")
),
conditionalPanel(
condition = "input.predMod == 'Regression Tree'",
withSpinner(verbatimTextOutput("regPred"),
proxy.height = "100px")
),
conditionalPanel(
condition = "input.predMod == 'Random Forest'",
withSpinner(verbatimTextOutput("rfPred"),
proxy.height = "100px")
)
)
)
)
)
)
)
)
)
| /ui.R | permissive | kedumas/ST-558-Project-3 | R | false | false | 30,846 | r | library(shinydashboard)
library(shinydashboardPlus)
library(DT)
library(plotly)
library(shinycssloaders)
# Static code is in the helper.R file. This includes reading in the initial data set and cleaning and also
# easily callable subsets of the variables and variable names. See helper.R for more information.
source("helper.R")
fluidPage(dashboardPage(
skin = "red",
dashboardHeader(title = "Video Game Trends"),
dashboardSidebar(
sidebarMenu(
menuItem("About", tabName = "About", icon = icon("question")),
menuItem("Data", tabName = "Data", icon = icon("th")),
menuItem("Data Exploration", tabName = "DataExploration", icon = icon("binoculars")),
menuItem("Modeling", tabName = "Modeling", icon = icon("chart-area"))
)
),
dashboardBody(
tabItems(
# About page content
tabItem(tabName = "About",
h2("About Page"),
mainPanel(
h3("About this App"),
p("This app will allow users to explore the sales and ratings data for different video games. The ratings are from Metacritic and the sales data is from vgchartz.",
"The goal is to see if there are any trends or correlations between ratings, sales and publisher/developer and platform."),
br(),
h3("About the Data"),
h4("This video game data set was taken from",
a(href="https://www.kaggle.com/rush4ratio/video-game-sales-with-ratings", "Kaggle"),
"and is called ", em("Video Game Sales with Ratings")),
img(src = "psvita.png", height = 300, width = 300),
p("Image of the Playstation Vita, from", a(href="https://kotaku.com/fans-are-finally-coming-to-terms-with-the-vitas-death-1833298145", "Kotaku.com.")),
p("There are 16 variables in this dataset: Name, Platform, Year of Release, Genre, Publisher, North American Sales (NA_Sales), European Sales (EU_Sales),",
"Japanese Sales (JP_Sales), Rest of World Sales (Other_Sales), Global Sales, Critic Score, Critic Count, User Score, User Count, and Rating.",
"All sales data is in millions of dollars and Global sales is the sum of all the rest of the sales data, critic scores are 1-100 while user scores are 1.0-10.0.",
"The user and critic counts are the number of scores given. The original dataset contained 16,719 observations. This number has been reduced by removing",
"observations that fall into categories with less than 10 observations total. Further, only the top 15 game publishers are included in this dataset. Through this",
"reduction there are now 4,622 different observations in this set for games that range the gambit of genres. Two tables below show",
"the short hand and long form of the variables Platform, which is the hardware the game was designed to run on, and the game's Rating."),
tableOutput("platTable"),
tableOutput("rateTable"),
br(),
h3("About the Other Pages"),
h4("Data"),
p("This page allows the user to look through and filter or subset the data as desired.",
"It also allows the user to download a .csv of either the full dataset or the filtered/subsetted data that they chose.",
"The full original dataset from Kaggle is also available in the GitHub repo for this shiny app."),
br(),
h4("Data Exploration"),
p("This page allows for exploratory analysis of the data, including the creation of different plots and summaries.",
"The summary statistics and correlation for differenet variables can be obtained. Three other plots are included:",
"barplot, violin plot and scatterplot. These plots are able to be filtered and have different variables selected.",
"There is also the ability to download the selected plot."),
br(),
h4("Modeling"),
p("The Modeling page has three different tabs: Information, Fitting and Prediction. The Information tab explains the models used, ",
"the Fitting tab allows the user to select different inputs for the models and the Prediction tab will predict the rating of a ",
"game based on teh full model and selected user inputs."),
br(),
)
),
# Data page content
tabItem(tabName = "Data",
h2("Data"),
mainPanel(
selectizeInput("dataFilt", "Filter Table", choices =
list("No Filter" = " ", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr,
"Publisher" = uPubl,"Rating" = uRatg), multiple = FALSE),
selectizeInput("DataSel", "Select Desired Columns", choices = allVars, selected = allVars, multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
downloadButton("saveData", "Save Current Data Set"),
dataTableOutput("allData", width = "1000px")
)
),
# Data Exploration page content
tabItem(tabName = "DataExploration",
h2("Data Exploration Fun"),
mainPanel(
# Drop down menu for the desired output
selectInput("plotSum", "Select the desired plot or summary", choices = dataInputs),
# Action button to save desired plots
conditionalPanel(
condition = "input.plotSum != 'Summary Statistics' & input.plotSum != 'Scatterplot'",
downloadButton("savePlotSum", "Download")
),
# Summary statistics of the data set
conditionalPanel(
condition = "input.plotSum == 'Summary Statistics'",
h3("Summary Statistics"),
fluidRow(
column(6,
selectizeInput("sumOpts", "Variables for the Summary Statistics",
choices = allVars[-c(1, 2, 4, 5, 15)], selected = allVars[-c(1, 2, 4, 5, 15)], multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
),
column(6,
selectInput("pickSum", "Summary", choices = c("Minimum and Maximum", "Quantiles", "Interquartile Range", "Mean and Median"),
selected = "Minimum and Maximum")
)
),
verbatimTextOutput("sumData")
),
# Check boxes for user input and the corresponding correlation plot
conditionalPanel(
condition = "input.plotSum == 'Correlation Plot'",
h3("Correlation Plot"),
checkboxGroupInput("corOpts", "Variables for the Correlation Plot", choices = corVars,
selected = corVars, inline = TRUE),
plotOutput("corPlot"),
p("**Please note that Global Sales is the sum of all other sales and so it's expected to be highly correlated",
"with the other sales data.")
),
# Barplot
conditionalPanel(
condition = "input.plotSum == 'Barplot'",
h3("Barplot"),
# Options for filtering
selectInput("filtBar", "Filter Observations", choices =
list("No Filter" = "No Filter", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr, "Publisher" = uPubl,
"Rating" = uRatg)),
selectInput("facts", "Select the Variable of interest for the Barplot", choices = barVars, selected = barVars[2]),
plotOutput("bar", width = "100%")
),
# Violin Plot
conditionalPanel(
condition = "input.plotSum == 'Violin Plot'",
h3("Violin Plot"),
fluidRow(box(
# Options for filtering
selectInput("filtVio", "Filter Observations", choices =
list("No Filter" = "No Filter", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr, "Publisher" = uPubl,
"Rating" = uRatg)), width = 4),
box(selectInput("xVio", "Select the 'X' variable", choices = barVars, selected = barVars[4]), width = 4),
box(selectInput("yVio", "Select the 'Y' variable", choices = numVars, selected = numVars[7]), width = 4),
box(plotOutput("violin"), width = 12)
)
),
# Scatterplot
conditionalPanel(
condition = "input.plotSum == 'Scatterplot'",
p("Please use the plotly download button to save a png of the plot."),
h3("Scatterplot"),
fluidRow(box(
# Options for filtering
selectInput("filtSca", "Filter Observations", choices =
list("No Filter" = "No Filter", "Platform" = uPlat, "Year" = uYear, "Genre" = uGenr, "Publisher" = uPubl,
"Rating" = uRatg)), width = 4),
box(selectInput("xSca", "Select the 'X' variable", choices = numVars, selected = numVars[1]), width = 4),
box(selectInput("ySca", "Select the 'Y' variable", choices = numVars, selected = numVars[7]), width = 4),
),
# Scatterplot output
box(plotlyOutput("scatter"), width = 12)
)
)
),
# Modeling page content
tabItem(tabName = "Modeling",
tags$head(
tags$style(
HTML(".shiny-notification {
position:fixed;
top: calc(50%);
left: calc(25%);
}"
)
),
),
h2("Modeling Content"),
mainPanel(
# Output: Tabset with modeling information, fitting and prediction
tabsetPanel(type = "tabs",
tabPanel("Modeling Info",
br(),
p("This app uses the caret package to fit all models."),
h2("Multiple Linear Regression"),
p("Method in caret: lm"),
h3("Benefits"),
p("Multiple Linear Regression is used for data with a continuous response and two or more predictor variables.",
"In this app there is only a single response variable.",
"This type of model assumes that there is a linear relationship between the response and predictors."),
h3("Model"),
withMathJax(helpText("$$y_i=\\beta_{0}+\\beta_{1}\ x_{i1}+\\beta_{2}\ x_{i2}+\\cdots+\\beta_{p}\ x_{ip}+\\epsilon_{i}$$")),
withMathJax(helpText("$$i=1,\\cdots,n$$")),
p("Where the response variable:"),
withMathJax(helpText("$$y_i$$")),
p("Coefficients:"),
withMathJax(helpText("$$\\beta_p$$")),
p("Predictor variables:"),
withMathJax(helpText("$$x_{ip}$$")),
p("Error term:"),
withMathJax(helpText("$$\\epsilon_i$$")),
h3("Drawbacks"),
p("Linearity of the relationship between the predictors and response is a needed assumption and real world ",
"data is doesn't necessarily have this linear relationship. Becasue of this, the model may not be the best ",
"fit for the data, or may give misleading results."),
h2("Regression Tree Model"),
p("Method in caret: rpart"),
h3("Benefits"),
p("With tree based models, there is no assumption of linearity or other relationships. This allows for a ",
"wider application on different sets of data. Further, the interpretability is high with easy to ",
"understand graphics associated with these models."),
h3("Determining Split"),
p("The way that decision trees are able to regress or classify is by splitting the predictor space. The following ",
"equation is the way they do this:"),
withMathJax(helpText("$$\\sum_{j=1}^J\ \\sum_{i\\in{R_j}}\ (y_i - \\hat{y}_{R_j})^2$$")),
h3("Drawbacks"),
p("The tree models are greedy. This means that the model will make the best split at that moment, and not consider a worse split",
"now to have an even better outcome further down the line. This can keep the tree from giving the best model.",
"Trees like this are very vulnerable to the split of data. Here, the random seed is 13 and all splits use",
"this seed. This reduces variablility between the data splits for building the tree. It may also be a lot",
"easier to overfit, this is where pruning comes into play."),
h2("Random Forest"),
p("Method in caret: ranger"),
h3("Benefits"),
p("Random forest is a type of bootstrap aggregated tree model. Many, many trees are fit then aggregated. These trees will have a much lower ",
"correlation to one another because of how the splits are produced. Only m predictors are used per split."),
h3("m Predictors"),
p("To use random forest, the total number of predictors, p, is reduced to the number of candidate predictors per split, m. The method known as 'Bagging'",
"is a special case in that m equals the total number of p predictors. Below we can see the rule of thumb for m when doing regression with random forest."),
withMathJax(helpText("$$m \\approx\ \\frac{p}{3} $$")),
h3("Drawbacks"),
p("The biggest drawback of any aggregated tree method is the loss interpretability that the single regression or classification tree possesses.")
),
tabPanel("Model Fitting",
h2(),
fluidRow(column(4,
wellPanel(
sliderInput("split", "Percentage of Data for the Training Set", min = 50,
max = 85, value = 75, step = 1),
actionButton("run", "Run Models"),
selectInput("resp", "Response Variable", choices = numVars[-2], selected = numVars[1]),
checkboxGroupInput("pred", "Predictor Variables", choices = allVars[c(2:5, 7:15)],
selected = allVars[c(3, 4, 11)]),
selectInput("cv", "Please Select a Cross Validation Method", choices =
c("Cross Validation" = "cv", "Repeated Cross Validation" = "repeatedcv")),
sliderInput("cvNum", "Number of Folds", min = 3, max = 20, value = 10, step = 1),
selectInput("cvRep", "Number of Repeats for Repeated CV", choices = c(2, 3, 4, 5, 6)),
p("Random Forest Tuning"),
selectizeInput("mtryNum", "Number of Variables to Try", choices = 2:67, selected = 7, multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
selectizeInput("sRule", "SplitRule", choices = c("variance", "extratrees"), selected = "variance", multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE)),
selectizeInput("minNode", "Minimum Node Size", choices = c(4, 5, 6), selected = 5, multiple = TRUE,
options = list('plugins' = list('remove_button'),
'create' = TRUE,
'persist' = FALSE))
)
),
column(8,
h3("Multiple Linear Regression Model Fit Summary"),
verbatimTextOutput("mlrModel"),
h3("Multiple Linear Regression Model Fit Error on Test Set"),
verbatimTextOutput("mlrErr"),
h3("Regression Tree Fit Summary"),
verbatimTextOutput("regTree"),
h3("Regression Tree Fit Error on Test Set"),
verbatimTextOutput("treeErr"),
h3("Random Forest Fit Summary"),
verbatimTextOutput("randForest"),
h3("Random Forest Fit Error On Test Set"),
withSpinner(verbatimTextOutput("rfErr"))
)
)
),
tabPanel("Prediction",
h3("Prediction for Video Game"),
p("To predict a response based on the model described in the Model Fitting Tab, please select that in the 'Model' dropdown below,",
"else the full model will be fit and predicted on. The default tuning caret uses will be used in the full models."),
fluidRow(
column(3,
selectInput("predMod", "Prediction Model", choices = c("Multiple Linear Regression", "Regression Tree", "Random Forest"))
),
column(3,
selectInput("modPref", "Model", choices = c("Model Fitting Tab", "Full Model"), selected = "Full Model")
),
column(3,
selectInput("regResp", "Response to Predict", choices = numVars[c(-7, -9)], selected = numVars[1])
),
column(3,
actionButton("predButton", "Predict!")
)
),
br(),
p("Use the options below to input various options for prediction."),
fluidRow(
column(4,
selectInput("plat", "Platform", choices = uPlat, selected = uPlat[9])
),
column(4,
selectInput("year", "Year_of_Release", choices = uYear, selected = uYear[1])
),
column(4,
selectInput("genre", "Genre", choices = uGenr, selected = uGenr[8])
)
),
fluidRow(
column(4,
selectInput("publ", "Publisher", choices = uPubl, selected = uPubl[8])
),
column(4,
selectInput("ratg", "Rating", choices = uRatg, selected = uRatg[3])
),
column(4,
numericInput("critS", "Critic Score", value = 50, min = 0, max = 100, step = 1)
),
),
fluidRow(
column(3,
numericInput("critC", "Critic Count", value = max(games$Critic_Count)/2, min = min(games$Critic_Count),
max = max(games$Critic_Count), step = 1)
),
column(3,
numericInput("useS", "User Score", value = 5, min = 0, max = 10, step = 0.1)
),
column(3,
numericInput("useC", "User Count", value = 5000, min = min(games$User_Count), max = max(games$User_Count),
step = 1)
),
column(3,
sliderInput("naSal", "North American Sales", value = max(games$NA_Sales)/2, min = 0,
max = max(games$NA_Sales), step = 0.01)
)
),
fluidRow(
column(3,
sliderInput("euSal", "European Sales", value = max(games$EU_Sales)/2, min = 0,
max = max(games$EU_Sales), step = 0.01)
),
column(3,
sliderInput("jpSal", "Japanese Sales", value = max(games$JP_Sales)/2, min = 0,
max = max(games$JP_Sales), step = 0.01)
),
column(3,
sliderInput("otSal", "Other Sales", value = max(games$Other_Sales)/2, min = 0,
max = max(games$Other_Sales), step = 0.01)
),
column(3,
sliderInput("glSal", "Global Sales", value = max(games$Global_Sales)/2, min = 0,
max = max(games$Global_Sales), step = 0.01)
)
),
conditionalPanel(
condition = "input.predMod == 'Multiple Linear Regression'",
withSpinner(verbatimTextOutput("mlrPred"),
proxy.height = "100px")
),
conditionalPanel(
condition = "input.predMod == 'Regression Tree'",
withSpinner(verbatimTextOutput("regPred"),
proxy.height = "100px")
),
conditionalPanel(
condition = "input.predMod == 'Random Forest'",
withSpinner(verbatimTextOutput("rfPred"),
proxy.height = "100px")
)
)
)
)
)
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/divisions_lords.R
\name{fetch_lords_divisions_all}
\alias{fetch_lords_divisions_all}
\title{Fetch key details on all Lords divisions}
\usage{
fetch_lords_divisions_all(from_date = NULL, to_date = NULL, on_date = NULL)
}
\arguments{
\item{from_date}{A string or Date representing a date. If a string is used
it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is NULL, which means no records are excluded on the basis of
the from_date.}
\item{to_date}{A string or Date representing a date. If a string is used
it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is NULL, which means no records are excluded on the basis of
the to_date.}
\item{on_date}{A string or Date representing a date. If a string is used
it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is NULL, which means no records are excluded on the basis of
the on_date.}
}
\value{
A tibble of key details for each Lords division, with one row
per division.
}
\description{
\code{fetch_lords_divisions_all} fetches a dataframe from the Lords Votes
API showing key details about each division, with one row per division.
}
\details{
The from_date and to_date arguments can be used to filter divisions based
on the dates they occurred. The on_date argument is a convenience that sets
the from_date and to_date to the same given date. The on_date has priority:
if the on_date is set, the from_date and to_date are ignored.
}
| /man/fetch_lords_divisions_all.Rd | no_license | houseofcommonslibrary/clvotes | R | false | true | 1,574 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/divisions_lords.R
\name{fetch_lords_divisions_all}
\alias{fetch_lords_divisions_all}
\title{Fetch key details on all Lords divisions}
\usage{
fetch_lords_divisions_all(from_date = NULL, to_date = NULL, on_date = NULL)
}
\arguments{
\item{from_date}{A string or Date representing a date. If a string is used
it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is NULL, which means no records are excluded on the basis of
the from_date.}
\item{to_date}{A string or Date representing a date. If a string is used
it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is NULL, which means no records are excluded on the basis of
the to_date.}
\item{on_date}{A string or Date representing a date. If a string is used
it should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is NULL, which means no records are excluded on the basis of
the on_date.}
}
\value{
A tibble of key details for each Lords division, with one row
per division.
}
\description{
\code{fetch_lords_divisions_all} fetches a dataframe from the Lords Votes
API showing key details about each division, with one row per division.
}
\details{
The from_date and to_date arguments can be used to filter divisions based
on the dates they occurred. The on_date argument is a convenience that sets
the from_date and to_date to the same given date. The on_date has priority:
if the on_date is set, the from_date and to_date are ignored.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DigitalPicoTools.R
\docType{class}
\name{LFRdataframe-class}
\alias{LFRdataframe}
\alias{LFRdataframe-class}
\title{The LFRdataframe class}
\description{
An S4 base class. All other LFR class containing a data frame Table as main object inherit from this base class.
}
\section{Slots}{
\describe{
\item{\code{Table}}{Object of class \code{\link{data.frame}}, main container of the \code{\link{LFRdataframe}} class}
}}
\seealso{
\code{\link{LFRset}}, \code{\link{SampleCoverage}}, \code{\link{VariantAlleleInfo}}
}
| /man/LFRdataframe-class.Rd | no_license | chedonat/DigitalPicoTools | R | false | true | 594 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DigitalPicoTools.R
\docType{class}
\name{LFRdataframe-class}
\alias{LFRdataframe}
\alias{LFRdataframe-class}
\title{The LFRdataframe class}
\description{
An S4 base class. All other LFR class containing a data frame Table as main object inherit from this base class.
}
\section{Slots}{
\describe{
\item{\code{Table}}{Object of class \code{\link{data.frame}}, main container of the \code{\link{LFRdataframe}} class}
}}
\seealso{
\code{\link{LFRset}}, \code{\link{SampleCoverage}}, \code{\link{VariantAlleleInfo}}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.