blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad6e86471a2482089763eb07c064c67b948c9715
|
afe9b94df6f6a3211ace68b127f57ca38a1965af
|
/R/createLink.R
|
1918f45da240e0beec47a3a9cb397c5498563fee
|
[] |
no_license
|
datastorm-open/antaresEditObject
|
d10e1f80cdcb4749a82b575ba037ddb642c183fb
|
49739939a8a4e4857db94031b5e76a81ddb03f7c
|
refs/heads/master
| 2021-07-21T14:38:29.878961
| 2017-10-31T08:41:54
| 2017-10-31T08:41:54
| 106,667,353
| 1
| 0
| null | 2017-10-12T08:42:54
| 2017-10-12T08:42:54
| null |
UTF-8
|
R
| false
| false
| 5,311
|
r
|
createLink.R
|
#' Create a link between two areas
#'
#' @param from The first area from which to create a link
#' @param to The second one
#' @param propertiesLink a named list containing the link properties, e.g. hurdles-cost
#' or transmission-capacities for example.
#' @param dataLink a matrix with five column corresponding to : trans. capacity (direct)
#' trans. capacity (indirect), impedances, hurdles cost (direct), hurdles cost (indirect).
#' If \code{NULL} (default), a matrix whose rows are equal to \code{1, 1, 0, 0, 0} is set. See Details
#' @param overwrite Logical, overwrite the previous between the two areas if exist
#' @param opts
#' List of simulation parameters returned by the function
#' \code{antaresRead::setSimulationPath}
#'
#' @note In Antares, areas are sorted in alphabetical order to establish links between.
#' For example, link between "fr" and "be" will appear under "be".
#' So the areas are sorted before creating the link between them, and \code{dataLink} is
#' rearranged to match the new order.
#'
#' @details The five times-series are:
#' \itemize{
#' \item{"NTC direct"}{the upstream-to-downstream capacity, in MW}
#' \item{"NTC indirect"}{the downstream-to-upstream capacity, in MW}
#' \item{"Impedances"}{virtual impedances that are used in economy simulations to give a physical meaning to raw outputs, when no binding constraints have been defined to enforce Kirchhoff's laws.}
#' \item{"Hurdle cost direct"}{an upstream-to-downstream transmission fee, in euro/MWh}
#' \item{"Hurdle cost indirect"}{a downstream-to-upstream transmission fee, in euro/MWh}
#' }
#'
#' @return An updated list containing various information about the simulation.
#' @export
#'
#' @importFrom assertthat assert_that
#' @importFrom stats setNames
#' @importFrom utils read.table write.table
#'
#' @examples
#' \dontrun{
#' createLink(from = "myarea", to = "myarea2")
#' }
createLink <- function(from, to, propertiesLink = propertiesLinkOptions(), dataLink = NULL, overwrite = FALSE, opts = antaresRead::simOptions()) {
assertthat::assert_that(class(opts) == "simOptions")
if (!is.null(dataLink))
assertthat::assert_that(ncol(dataLink) == 5)
# control areas name
# can be with some upper case (list.txt)
from <- tolower(from)
to <- tolower(to)
# areas' order
areas <- c(from, to)
if (!identical(areas, sort(areas))) {
from <- areas[2]
to <- areas[1]
}
# Input path
inputPath <- opts$inputPath
assertthat::assert_that(!is.null(inputPath) && file.exists(inputPath))
if (!from %in% opts$areaList)
stop(paste(from, "is not a valid area"))
if (!to %in% opts$areaList)
stop(paste(to, "is not a valid area"))
# Previous links
prev_links <- readIniFile(
file = file.path(inputPath, "links", from, "properties.ini")
)
if (to %in% names(prev_links) & !overwrite)
stop(paste("Link to", to, "already exist"))
if (to %in% names(prev_links) & overwrite) {
opts <- removeLink(from = from, to = to, opts = opts)
prev_links <- readIniFile(
file = file.path(inputPath, "links", from, "properties.ini")
)
}
# propLink <- list(propertiesLink)
prev_links[[to]] <- propertiesLink
# Write INI file
writeIni(
listData = prev_links, # c(prev_links, stats::setNames(propLink, to)),
pathIni = file.path(inputPath, "links", from, "properties.ini"),
overwrite = TRUE
)
# initialization data
if (is.null(dataLink))
dataLink <- matrix(data = c(rep(1, 8760*2), rep(0, 8760*3)), ncol = 5)
if (!identical(areas, sort(areas))) {
dataLink <- dataLink[, c(2, 1, 3, 5, 4)]
}
utils::write.table(
x = dataLink, row.names = FALSE, col.names = FALSE, sep = "\t",
file = file.path(inputPath, "links", from, paste0(to, ".txt"))
)
# Maj simulation
suppressWarnings({
res <- antaresRead::setSimulationPath(path = opts$studyPath, simulation = "input")
})
invisible(res)
}
#' Properties for creating a link
#'
#' @param hurdles_cost Logical, which is used to state whether (linear)
#' transmission fees should be taken into account or not in economy and adequacy simulations
#' @param transmission_capacities Character, one of \code{enabled}, \code{ignore} or \code{infinite}, which is used to state whether
#' the capacities to consider are those indicated in 8760-hour arrays or
#' if zero or infinite values should be used instead (actual values / set to zero / set to infinite)
#' @param display_comments Logical
#' @param filter_synthesis Output synthesis
#' @param filter_year_by_year Output year-by-year
#'
#' @return A named list
#' @export
#'
#' @examples
#' \dontrun{
#' propertiesLinkOptions()
#' }
propertiesLinkOptions <- function(hurdles_cost = FALSE,
transmission_capacities = "enabled",
display_comments = TRUE,
filter_synthesis = c("hourly", "daily", "weekly", "monthly", "annual"),
filter_year_by_year = c("hourly", "daily", "weekly", "monthly", "annual")) {
list(
`hurdles-cost` = hurdles_cost,
`transmission-capacities` = transmission_capacities,
`display-comments` = display_comments,
`filter-synthesis` = filter_synthesis,
`filter-year-by-year` = filter_year_by_year
)
}
|
b8f1e24355dc46b154f85c686ea7984f64a68685
|
150ddbd54cf97ddf83f614e956f9f7133e9778c0
|
/tests/testthat/test-symmetrise.R
|
bb72b6a242945a2daee02d376d41e82c6b4224a4
|
[
"CC-BY-4.0"
] |
permissive
|
debruine/webmorphR
|
1119fd3bdca5be4049e8793075b409b7caa61aad
|
f46a9c8e1f1b5ecd89e8ca68bb6378f83f2e41cb
|
refs/heads/master
| 2023-04-14T22:37:58.281172
| 2022-08-14T12:26:57
| 2022-08-14T12:26:57
| 357,819,230
| 6
| 4
|
CC-BY-4.0
| 2023-02-23T04:56:01
| 2021-04-14T07:47:17
|
R
|
UTF-8
|
R
| false
| false
| 4,162
|
r
|
test-symmetrise.R
|
#wm_opts(server = "https://webmorph.test")
# frl ----
test_that("frl", {
skip_on_cran()
stimuli <- demo_tems("frl")
sym_both <- symmetrize(stimuli)
sym_shape <- symmetrize(stimuli, color = 0)
sym_color <- symmetrize(stimuli, shape = 0)
sym_anti <- symmetrize(stimuli, shape = -1.0, color = 0)
# c(stimuli, sym_both, sym_shape, sym_color, sym_anti) |>
# plot(maxwidth = 600, nrow = 2)
o_pts <- stimuli[[1]]$points
b_pts <- sym_both[[1]]$points
s_pts <- sym_shape[[1]]$points
c_pts <- sym_color[[1]]$points
a_pts <- sym_anti[[1]]$points
expect_equal(floor(o_pts), c_pts)
expect_equal(b_pts, s_pts)
expect_false(all(s_pts == c_pts))
expect_equal(c_pts + (c_pts - s_pts), a_pts)
# alias
sym_shape2 <- symmetrise(stimuli, color = 0)
expect_equal(sym_shape2[[1]]$points, s_pts)
expect_equivalent(compare(sym_shape, sym_shape2), 0)
})
# fpp106 ----
test_that("fpp106", {
skip_on_cran()
tem_id <- "fpp106"
stimuli <- demo_tems(tem_id)
sym_both <- symmetrize(stimuli, tem_id = tem_id)
sym_shape <- symmetrize(stimuli, color = 0, tem_id = tem_id)
sym_color <- symmetrize(stimuli, shape = 0, tem_id = tem_id)
sym_anti <- symmetrize(stimuli, shape = -1.0, color = 0, tem_id = tem_id)
# c(stimuli, sym_both, sym_shape, sym_color, sym_anti) |>
# draw_tem() |>
# plot(maxwidth = 600, nrow = 2)
o_pts <- stimuli[[1]]$points
b_pts <- sym_both[[1]]$points
s_pts <- sym_shape[[1]]$points
c_pts <- sym_color[[1]]$points
a_pts <- sym_anti[[1]]$points
expect_equal(floor(o_pts), c_pts)
expect_equal(b_pts, s_pts)
expect_false(all(s_pts == c_pts))
expect_equal(c_pts + (c_pts - s_pts), a_pts)
})
# fpp83 ----
test_that("fpp83", {
skip_on_cran()
tem_id <- "fpp83"
stimuli <- demo_tems(tem_id)
sym_both <- symmetrize(stimuli, tem_id = tem_id)
sym_shape <- symmetrize(stimuli, color = 0, tem_id = tem_id)
sym_color <- symmetrize(stimuli, shape = 0, tem_id = tem_id)
sym_anti <- symmetrize(stimuli, shape = -1.0, color = 0, tem_id = tem_id)
# c(stimuli, sym_both, sym_shape, sym_color, sym_anti) |>
# draw_tem() |>
# plot(maxwidth = 600, nrow = 2)
o_pts <- stimuli[[1]]$points
b_pts <- sym_both[[1]]$points
s_pts <- sym_shape[[1]]$points
c_pts <- sym_color[[1]]$points
a_pts <- sym_anti[[1]]$points
expect_equal(floor(o_pts), c_pts)
expect_equal(b_pts, s_pts)
expect_false(all(s_pts == c_pts))
expect_equal(c_pts + (c_pts - s_pts), a_pts)
})
# dlib70 ----
test_that("dlib70", {
skip_on_cran()
skip_if_offline()
tem_id <- "dlib70"
stimuli <- demo_tems(tem_id)
sym_both <- symmetrize(stimuli, tem_id = tem_id)
sym_shape <- symmetrize(stimuli, color = 0, tem_id = tem_id)
sym_color <- symmetrize(stimuli, shape = 0, tem_id = tem_id)
sym_anti <- symmetrize(stimuli, shape = -1.0, color = 0, tem_id = tem_id)
# c(stimuli, sym_both, sym_shape, sym_color, sym_anti) |>
# draw_tem() |>
# plot(maxwidth = 600, nrow = 2)
o_pts <- stimuli[[1]]$points
b_pts <- sym_both[[1]]$points
s_pts <- sym_shape[[1]]$points
c_pts <- sym_color[[1]]$points
a_pts <- sym_anti[[1]]$points
expect_equal(floor(o_pts), c_pts)
expect_equal(b_pts, s_pts)
expect_false(all(s_pts == c_pts))
expect_equal(c_pts + (c_pts - s_pts), a_pts)
})
# dlib7 ----
test_that("dlib7", {
skip_on_cran()
tem_id <- "dlib7"
stimuli <- demo_tems(tem_id)[1]
sym_both <- symmetrize(stimuli, tem_id = tem_id)
sym_shape <- symmetrize(stimuli, color = 0, tem_id = tem_id)
sym_color <- symmetrize(stimuli, shape = 0, tem_id = tem_id)
sym_anti <- symmetrize(stimuli, shape = -1.0, color = 0, tem_id = tem_id)
# c(stimuli, sym_both, sym_shape, sym_color, sym_anti) |>
# draw_tem() |>
# plot(maxwidth = 600, nrow = 2)
o_pts <- stimuli[[1]]$points
b_pts <- sym_both[[1]]$points
s_pts <- sym_shape[[1]]$points
c_pts <- sym_color[[1]]$points
a_pts <- sym_anti[[1]]$points
expect_equal(floor(o_pts), c_pts)
expect_equal(b_pts, s_pts)
expect_false(all(s_pts == c_pts))
expect_equal(c_pts + (c_pts - s_pts), a_pts)
})
wm_opts(server = "https://webmorph.org")
|
36fb3ae17533ad2b38942c78e7e2077171256e55
|
bd8e2bb20817f4f829db6bd308ba686812b20027
|
/man/neuro_install.Rd
|
c1121e1bd30e09250a3c0614971ba26b65fb1639
|
[] |
no_license
|
muschellij2/neurocInstall
|
3a1d9524cf8785589bb58ae3a072a1aee82ca4c8
|
c1792997fe2ce04679e25c1a2d3e05359cc652e8
|
refs/heads/master
| 2022-12-29T19:36:11.536992
| 2020-10-15T19:46:58
| 2020-10-15T19:46:58
| 66,025,383
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,294
|
rd
|
neuro_install.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neuro_install.R
\name{neuro_install}
\alias{neuro_install}
\alias{neuroc_install}
\alias{neurocLite}
\title{Neuroconductor Installer}
\usage{
neuro_install(
repo,
release = c("stable", "current"),
release_repo = latest_neuroc_release(),
upgrade_dependencies = FALSE,
type = getOption("pkgType"),
...
)
neuroc_install(...)
neurocLite(...)
}
\arguments{
\item{repo}{Package name in neuroconductor}
\item{release}{Stable or current (development) versions/branches}
\item{release_repo}{Repository for release repository, passed to
\code{\link{install.packages}}. If \code{release_repo = "github"},
then it will install using GitHub. If you set this using
\code{\link{make_release_version}} or specify the URL directly,
it will override \code{release} option.}
\item{upgrade_dependencies}{Should dependencies be updated?
passed to \code{\link[devtools]{install}} if using
\code{release_repo = "github"}}
\item{type}{character, indicating the type of package to download and
install, passed to \code{\link{install.packages}}.}
\item{...}{additional arguments passed to
\code{\link{install.packages}}
or \code{\link[devtools]{install_github}} if
\code{release_repo = "github"}}
}
\value{
Result from \code{\link{install.packages}} or
\code{\link[devtools]{install_github}}
}
\description{
Install function for neuroconductor packages
}
\examples{
\donttest{
tlib = tempfile()
dir.create(tlib, showWarnings = FALSE)
system.time({
install.packages("oro.asl",
lib = tlib,
repos = "https://neuroconductor.org/releases/2019/12/",
verbose = TRUE)
})
repos = getOption("repos")
print(repos)
#if (repos["CRAN"] == "@CRAN@") {
# repos["CRAN"] = "https://cloud.r-project.org"
# options(repos = repos)
#}
options(repos = NULL)
print(getOption("repos"))
neuro_install("oro.asl", lib = tlib,
release_repo = "https://neuroconductor.org/releases/2019/12")
options(repos = repos)
}
\donttest{
options(repos = "http://cran.r-project.org")
neuro_install("cifti", type = "source", lib = tlib, verbose = TRUE)
neuro_install("cifti",
release_repo = latest_neuroc_release(),
lib = tlib)
neuro_install("cifti", release_repo = "github")
}
}
|
70fee9c0724d381e9f3dd3ee97ebe11291f91e28
|
98550ab8b21f1d86f5954886911fc01498ef7699
|
/R/packageSample.R
|
aa6980ecc29b13654fad3c1833c130c723eab576
|
[] |
no_license
|
lindbrook/packageRank
|
a68ee94e0ed3621e7f10239f1eb2d12dbb7c6530
|
a83ebfaa05f6ee82b7e5ae76cf0b8a4c296b4dfb
|
refs/heads/master
| 2023-08-04T21:18:01.261280
| 2023-08-01T22:00:29
| 2023-08-01T22:00:29
| 184,319,415
| 27
| 1
| null | 2023-08-01T22:00:20
| 2019-04-30T19:25:45
|
R
|
UTF-8
|
R
| false
| false
| 4,449
|
r
|
packageSample.R
|
#' Stratified random sample of packages.
#'
#' Logs from RStudio's CRAN Mirror http://cran-logs.rstudio.com/
#' @param cran_log Object. CRAN log.
#' @param sample.pct Numeric.
#' @param multi.core Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. Mac and Unix only.
#' @noRd
packageSample <- function(cran_log, sample.pct = 1, multi.core = TRUE) {
init.pkgs <- unique(cran_log$package) # remove duplicated pkgs (diff versions)
init.pkgs <- stats::na.omit(init.pkgs)
pkgs <- cran_log[cran_log$package %in% init.pkgs, ]
freqtab <- table(pkgs$package)
cores <- multiCore(multi.core)
rank.percentile <- parallel::mclapply(names(freqtab), function(nm) {
mean(freqtab < freqtab[nm])
}, mc.cores = cores)
rank.percentile <- unlist(rank.percentile)
pct <- data.frame(pkg = names(freqtab), percentile = rank.percentile,
stringsAsFactors = FALSE)
pct <- pct[order(pct$percentile, decreasing = TRUE), ]
row.names(pct) <- NULL
# bins #
breaks <- seq(1, 0, -0.05)
bin.id <- lapply(2:length(breaks), function(i) {
which(pct$percentile > breaks[i] & pct$percentile <= breaks[i - 1])
})
# set seed for random sampling
set.seed(as.numeric(Sys.Date()))
sample.id <- lapply(seq_along(bin.id), function(i) {
sample(bin.id[[i]], round(sample.pct / 100 * length(bin.id[[i]])))
})
names(sample.id) <- paste(round(breaks[-1], 2))
pct[unlist(sample.id), "pkg"]
}
#' Stratified random sample of packages for versionPlot().
#'
#' Logs from RStudio's CRAN Mirror http://cran-logs.rstudio.com/
#' @param lst Object. List of CRAN download logs data frames.
#' @param repository Character. "cran" or "archive".
#' @param strata.samples Numeric. Number of samples from each stratum.
#' @param package.samples Numeric. Number of packages to sample from across strata for use in versionPlot().
#' @param use.seed Logical. Use today's date as seed.
#' @param multi.core Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. Mac and Unix only.
#' @note July benchmarks: cran = 61.684; archive = 35.597.
#' @noRd
packageSample2 <- function(lst, repository = "cran", strata.samples = 20,
package.samples = 100, use.seed = TRUE, multi.core = TRUE) {
cores <- multiCore(multi.core)
dts <- as.Date(names(lst))
# seq(as.Date("2020-07-01"), as.Date("2019-07-31"), by = "day")
first <- lst[[1]]
last <- lst[[length(lst)]]
first.wed <- which(weekdays(dts, abbreviate = TRUE) == "Wed")[1]
wed.pkgs <- unique(lst[[first.wed]]$package)
# estimate for packages based on current (now) CRAN and Archive
cran.pkgs <- cranPackages(multi.core = cores)
all.archive <- archivePackages(multi.core = cores)
archive.pkgs <- all.archive[!all.archive %in% cran.pkgs$package]
wed.cran <- wed.pkgs[wed.pkgs %in% cran.pkgs$package]
wed.not_cran <- wed.pkgs[!wed.pkgs %in% cran.pkgs$package]
if (repository == "archive") {
tmp <- wed.not_cran[wed.not_cran %in% archive.pkgs]
} else if (repository == "cran") {
tmp <- wed.cran[wed.cran %in% cran.pkgs$package]
} else stop('"respository" must be "archive" or "cran".')
tmp <- tmp[tmp %in% unique(first$package)]
pkgs <- tmp[tmp %in% unique(last$package)]
p.data <- first[first$package %in% pkgs, ]
freqtab <- table(p.data$package)
rank.percentile <- parallel::mclapply(names(freqtab), function(nm) {
mean(freqtab < freqtab[nm])
}, mc.cores = cores)
rank.percentile <- unlist(rank.percentile)
pct <- data.frame(pkg = names(freqtab), percentile = rank.percentile,
stringsAsFactors = FALSE)
pct <- pct[order(pct$percentile, decreasing = TRUE), ]
row.names(pct) <- NULL
# bins for stratification #
breaks <- seq(1, 0, -0.05)
bin.id <- lapply(2:length(breaks), function(i) {
which(pct$percentile > breaks[i] & pct$percentile <= breaks[i - 1])
})
# use seed for random sampling
if (use.seed) set.seed(as.numeric(Sys.Date()))
# vapply(bin.id, length, integer(1L))
sample.id <- lapply(bin.id, function(x) {
if (length(x) == 0) NA
else sample(x, strata.samples)
})
names(sample.id) <- paste(round(breaks[-1], 2))
sel <- vapply(sample.id, function(x) all(!is.na(x)), logical(1L))
sample.id <- sample.id[sel]
sel <- sample(unlist(sample.id), package.samples)
pct[sel, "pkg"]
}
|
c4e46b89cf13b64e4b470a21900fe6455602df18
|
5aff17c35e023029c0c7cec84f56404067826b60
|
/problem/day6/kimhokyeong_191204.R
|
4e8b556283d1f2946cd0a40daa1be408e2a9d39b
|
[] |
no_license
|
holaho-kim/workR
|
9a3bcf02ee4332b967a331f12398935930c026d2
|
7ce670de7369f90bc1beae17608e36bbe404876c
|
refs/heads/master
| 2022-05-30T23:16:33.100793
| 2020-01-16T04:44:33
| 2020-01-16T04:44:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,554
|
r
|
kimhokyeong_191204.R
|
# * 실습 결과를 R Script file로 제출
# * R Script file 이름은 "영문본인이름_제출일날짜.R" 부여하여 제출
# * R Script file의 처음에 주석으로 본인 이름과 작성일/제출일 기록
#
# 문1)
# 다음은 직장인 10명의 수입과 교육받은 기간을 조사한 자료이다. 산점도와 상관계수를 구하고,
# 수입과 교육기간 사이에 어떤 상관관계가 있는지 설명하시오.
#
# 수입 121 99 41 35 40 29 35 24 50 60
# 교육기간 19 20 16 16 18 12 14 12 16 17
income <- c( 121, 99, 41, 35, 40, 29, 35, 24, 50, 60 )
period <- c( 19, 20, 16, 16, 18, 12, 14, 12, 16, 17 )
plot( period, income, main = "교육기간-수입", xlab = "교육기간", ylab = "수입" )
ds <- data.frame( period, income )
cor( ds )
# 수입과 교육기간 사이에는 높은 상관관계가 있다. 교육기간이 길수록 수입이 높다.
#
# 문2)
# 다음은 대학생 10명의 성적과 주당 TV 시청시간을 조사한 자료이다. 산점도와 상관계수를 구하고,
# 성적과 TV 시청시간 사이에 어떤 상관관계가 있는지 설명하시오.
#
# 성적 77.5 60 50 95 55 85 72.5 80 92.5 87.5
# 시청시간 14 10 20 7 25 9 15 13 4 21
grade <- c( 77.5, 60, 50, 95, 55, 85, 72.5, 80, 92.5, 87.5 )
tvTime <- c( 14, 10, 20, 7, 25, 9, 15, 13, 4, 21 )
plot( tvTime, grade, main = "주당 TV시청시간-성적", xlab = "시청시간", ylab = "성적" )
ds <- data.frame( tvTime, grade )
cor( ds )
# 성적과 주당 TV시청시간 사이에는 높은 상관관계가 있다. TV시청시간이 높을수록 성적이 대체로 낮다.
#
# 문3)
# R에서 제공하는 mtcars 데이터셋에서 mpg와 다른 변수들 간의 상관계수를
# 구하시오. 어느 변수가 mpg와 가장 상관성이 높은지 산점도와 함께 설명하시오.
str( mtcars )
# 다른 변수들 간의 상관계수(mpg는 제외)
cor_mpg <- cor( mtcars )[ -1, "mpg"]
cor_mpg
# 가장 높은 상관계수 구하기
max_cor_mpg_idx <- which.max( abs( cor_mpg ) ) # 상관계수가 가장 높은 값의 index
max_cor_mpg_nm <- names( cor_mpg[min_cor_mpg_idx] ) # 상관계수가 가장 높은 변수명
# 가장 높은 상관계수와 mpg의 산점도
plot( mtcars$mpg, mtcars$wt, xlab = "mpg", ylab = max_cor_mpg_nm )
# mpg는 wt와 상관성이 가장 높다.
#
# 문4)
# 다음은 2015년부터 2026년도까지의 예상 인구수 추계자료이다. 연도를 x
# 축으로 하여 선그래프를 작성하시오.
#
# 연도 총인구 (천명) 연도 총인구 (천명)
# 2015 51014 2021 52123
# 2016 51245 2022 52261
# 2017 51446 2023 52388
# 2018 51635 2024 52504
# 2019 51811 2025 52609
# 2020 51973 2026 52704
year <- 2015:2026
people <- c( 51014, 51245, 51446, 51635, 51811, 51973, 52123, 52261, 52388, 52504, 52609, 52704 )
plot( year, people, main = "2015년부터 2026년도까지의 예상 인구수", type = "b", lty = 1, lwd = 1, xlab = "연도", ylab = "총인구(천명)" )
#
# 문5)
# R에서 제공하는 trees 데이터셋에 대해 다음 문제를 해결하기 위한 R 코
# 드를 작성하시오.
#
# (1) 나무의 지름(Girth)과 높이(Height)에 대해 산점도와 상관계수를 보이시오.
plot( trees$Girth, trees$Height, main = "나무의 지름 - 높이", xlab = "지름", ylab = "높이" )
cor( trees )[ 1:2, 1:2 ]
# (2) trees 데이터셋에 존재하는 3개 변수 간의 산점도와 상관계수를 보이시오.#
pairs( trees, main = "나무의 지름 - 높이" )
cor( trees )
|
46cae5b38d2e765aa68edd80d0653167040ae02f
|
7fe6c7028fad18327bc5e4b8eef89686de80ac7b
|
/plot2.R
|
0c71ba271f5c2dd96defc1ad0a0b1b802befc80b
|
[] |
no_license
|
vinodsrin/RepData_PeerAssessment1
|
6012f2ce73be67d8b775970a8aefff9c83c7c78e
|
dd74d5d1c2e83374a63cba597896434a8f50c369
|
refs/heads/master
| 2021-01-21T16:49:04.587892
| 2016-01-13T17:11:11
| 2016-01-13T17:11:11
| 49,567,062
| 0
| 0
| null | 2016-01-13T10:32:01
| 2016-01-13T10:32:01
| null |
UTF-8
|
R
| false
| false
| 1,046
|
r
|
plot2.R
|
#Uncomment below lines to download data required is not downloaded
#download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "./powerconsumption.zip")
#if(!file.exists("./powerconsumption")) {dir.create("./powerconsumption")}
#unzip("powerconsumption.zip", exdir = "./powerconsumption")
#Load data
powerconsumptionfile <- "./powerconsumption/household_power_consumption.txt"
powerconsumptiondata <- read.table(powerconsumptionfile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#Filter data for 1st and 2nd Feb 2007
PlotData <- powerconsumptiondata[powerconsumptiondata$Date %in% c("1/2/2007","2/2/2007") ,]
#Concatenate date and time
datetime <- strptime(paste(PlotData$Date, PlotData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Convert data to numeric
globalActivePower <- as.numeric(PlotData$Global_active_power)
#Plot Global Active Power data
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
8427b5411ae52937fd97edcbfa2a7e1d0822bb05
|
661a2e1bdd2eaf48c3ec7f93531821ee4e574292
|
/man/getAmendments.Rd
|
8f715c412030ff7e93e05ba58ad0e40970b5cbba
|
[] |
no_license
|
cran/washex
|
dc94cae67e9654d72184e7d37bb9c1c0ce763a27
|
561ac582539d94b46c3e1020386a0712ac4c4a5d
|
refs/heads/master
| 2023-09-04T21:38:52.121744
| 2021-11-17T15:00:02
| 2021-11-17T15:00:02
| 362,524,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,320
|
rd
|
getAmendments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAmendments.R
\name{getAmendments}
\alias{getAmendments}
\title{Get amendments to a bill}
\usage{
getAmendments(
biennium,
billNumber,
paired = TRUE,
type = c("df", "list", "xml")
)
}
\arguments{
\item{biennium}{Character vector representing the biennium(s) to be
searched. Each argument should take the form "XXXX-YY"}
\item{billNumber}{Character or numeric vector containing the bill number(s)
to be retrieved.}
\item{paired}{If TRUE, will assume that equal length vectors represent
paired data. Set to FALSE to generate an NxN grid of input
arguments. Applies to equal length vector inputs only.}
\item{type}{One of "df", "list", or "xml". Specifies the format for
the output.}
}
\value{
\code{getAmendments} returns an object of type equal to the
\code{type} argument (defaults to dataframe)
}
\description{
Get a list of all proposed amendments (accepted and rejected) on the bill,
including the URL to the amendment text
}
\examples{
## get amendments for a single bill
getAmendments("2007-08", "1001")
## get amendments for a specific set of bills
years <- c("2005-06","2007-08","2007-08")
bills <- c(1447,1219,1001)
getAmendments(years, bills, paired = TRUE, type = "df")
}
|
7473a5c309788517b8cb15745394b12c7bdb6e71
|
cc32077bcdf07924ad31e1cf0a75d13b9194ecd7
|
/testOUgeneration.R
|
14ff68f04b851a800847573dcd99c6e22c283111
|
[] |
no_license
|
AndrewLJackson/nodes-networks-energy
|
79ad34a08e0268f1e171d7cbc87e26342659f800
|
9770b6bde8af9aa983018c351ab07f8cd9243826
|
refs/heads/master
| 2020-12-24T19:27:18.059731
| 2016-03-11T12:15:39
| 2016-03-11T12:15:39
| 27,050,236
| 0
| 0
| null | 2015-10-12T20:14:11
| 2014-11-23T22:20:08
|
R
|
UTF-8
|
R
| false
| false
| 1,908
|
r
|
testOUgeneration.R
|
library(sde)
library(viridis)
palette(viridis(8))
# -----------------------------------------------------------------
# Ornstein-Uhlenbeck process
# -----------------------------------------------------------------
set.seed(1)
d <- expression(0 - 20 * x)
s <- expression(0.1)
time.max <- 100
N <- 10^3
y <- sde.sim(X0=0, drift=d, sigma=s, T=1, N=N, M=1)
x <- seq(0, time.max, length = N+1)
par(mfrow=c(1,2))
plot(x, y, main="Ornstein-Uhlenbeck", type="l")
print(var(y))
yy <- approxfun(x, y)
times <- seq(0, time.max, length = 100)
points(times, yy(times), pch = 19)
# -----------------------------------------------------------------
# same Ornstein-Uhlenbeck process with larger s
# -----------------------------------------------------------------
set.seed(1)
d <- expression(0 - 20 * x)
s <- expression(0.2)
time.max <- 100
N <- 10^3
y2 <- sde.sim(X0=0, drift=d, sigma=s, T=1, N=N, M=1)
x <- seq(0, time.max, length = N+1)
plot(x, y2, main="Ornstein-Uhlenbeck", type="l")
print(var(y))
yy <- approxfun(x, y2)
times <- seq(0, time.max, length = 100)
points(times, yy(times), pch = 19)
# -----------------------------------------------------------------
# test correlation between the two processes
print(cov(cbind(y, y2)))
print(cor(y,y2))
# -----------------------------------------------------------------
# same Ornstein-Uhlenbeck process with larger s
# -----------------------------------------------------------------
d <- expression(0 - 100 * x)
time.max <- 500
N <- 10^4
s.list <- c(expression(0.01) , expression(0.1) , expression(1) , expression(10))
results <- matrix(NA, N+1, length(s.list))
for (i in 1:length(s.list)){
set.seed(1)
y2 <- sde.sim(X0=0, drift=d, sigma=s.list[i], T=1, N=N, M=1)
results[, i] <- y2
}
x <- seq(0, time.max, length = N+1)
par(mfrow=c(1,1))
matplot(x, results, type="l", col = 1:ncol(results) , lty = 1)
|
ddd4eb25f10d8cd5f11a5fb92780181060598b9a
|
af5841763d8f0fdd5ca28114ff78324b5dbaa36b
|
/R/Loader.R
|
5ff671e1c98195da9aa86e20c23f7341a2968567
|
[] |
no_license
|
RJHKnight/TCALoader
|
fe8a56973c3303ef880450952bc9677b2074a49e
|
be8ea6a376c0730c6e9047df85c4ef0893a5fa48
|
refs/heads/master
| 2021-03-28T15:56:16.465896
| 2020-10-13T05:31:00
| 2020-10-13T05:31:00
| 247,876,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,076
|
r
|
Loader.R
|
DEFAULT_CSV_OFFSET <- 0
DEFAULT_EXCEL_OFFSET <- 4
FILE_SEP <- "/"
#' Load Multiple Post Trades
#'
#' Load multiple post trade files (csv and excel supported) returning a single
#' data.frame.
#'
#' @param path path containing the files to read
#' @param pattern pattern for files to match
#' @param add_filename should the file name be added to the returned data.frame
#' @param sheet_name sheet name to load (excel only)
#' @param row_offset offset to use when loading data
#'
#' @return data.frame containing the loaded files.
#'
#' @export
load_multiple <- function(path, pattern = "*.xlsx", add_filename = FALSE, sheet_name = "Detail", row_offset = NA, standardise = TRUE)
{
files <- list.files(path = path,
pattern = pattern,
full.names = TRUE)
all_results <- purrr::map_dfr(files, handle_one, add_filename, sheet_name, row_offset)
if (standardise)
{
all_results <- standardise_post_trade(all_results)
}
return (all_results)
}
handle_one <- function(file_name, add_filename, sheet_name, row_offset)
{
print(paste("Loading", file_name))
res <- NULL
if (stringr::str_detect(file_name, "\\.csv$"))
{
res <- load_from_csv(file_name, ifelse(is.na(row_offset), DEFUALT_CSV_OFFSET, row_offset))
}
else
{
res <- load_from_excel(file_name, sheet_name, ifelse(is.na(row_offset), DEFAULT_EXCEL_OFFSET, row_offset))
}
stripped_file_name <- get_stripped_file_name(file_name)
if (add_filename){
res <- res %>% mutate(file_name = stripped_file_name)
}
return (res)
}
# Warning - not vectorised
get_stripped_file_name <- function(file_name)
{
tail(unlist(stringr::str_split(file_name, FILE_SEP)),1)
}
load_from_csv <- function(file_name, row_offset = 0)
{
results <- readr::read_delim(
file_name,
";",
escape_double = FALSE,
trim_ws = TRUE)
return (results)
}
load_from_excel <- function(file_name, sheet_name, row_offset)
{
result <- readxl::read_excel(file_name,
sheet = sheet_name, skip = row_offset)
return (result)
}
|
ee9eddd07c91d898b83456828d2ea4a9991cae9d
|
0405bb266387a503a59b160688d8903e4bfc850f
|
/11_1_Tree_mask_updater_Nigeria_v7_1.R
|
939e7d43a59bea2f7ef48ecee29f891fdbd3485f
|
[] |
no_license
|
HKCaesar/RemoteSensing_automated_workflow_agriculture
|
6162ef0d27fc7701fc44e18b86770a112c147fca
|
671c9ba2b78f7157b1b2c3da1b03687c943df822
|
refs/heads/master
| 2020-12-03T05:08:53.357289
| 2017-04-30T15:55:45
| 2017-04-30T15:55:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,961
|
r
|
11_1_Tree_mask_updater_Nigeria_v7_1.R
|
#======================================================================================
# Variable definitions, data import, preparation
#======================================================================================
#rm(list=ls(all=TRUE))
graphics.off()
require(rgdal)
require(Rcpp)
WriteLog <- TRUE
Show_graph <- FALSE
Server <- FALSE
Show_ms <- TRUE
if(Server){
Root <- "/home/tolpekin/stars/derived/DG_v8"
Path_lib <- "/home/tolpekin/stars/derived/scripts/v8"
} else{
Root <- "S:/derived/DG_v8"
Path_lib <- "S:/derived/scripts/v8"
}
setwd(Path_lib)
#source("scale_space_lib_v7.R")
source("tree_measurement_lib_v9.R")
sourceCpp("matcher_v5.cpp")
sourceCpp("blobs_lib_v4.cpp")
sourceCpp("tree_mask_from_blobs_v3.cpp")
sourceCpp("MRF_lib.cpp")
# For Nigeria
master_id <- "053734892380_01"
Path_images <- paste(Root,"4_categ","NG_Kofa",sep="/")
# Get image names from the input dir: filename contains tif but not aux
setwd(Path_images)
Files <- list.files(".",pattern=".tif",ignore.case = TRUE)
aux_files <- grep(".aux",Files,ignore.case = TRUE)
if(length(aux_files)>0) Files <- Files[-aux_files]
ind_master <- grep(master_id, Files,ignore.case = TRUE)
All_files <- Files
# Set input & output directories
Path_in <- paste(Root, "5_tree_measurement",sep="/")
# Two slave images used to update the mask. Run sequentially.
#image_id <- "054112895100_01"
image_id <- "054399067010_01"
Path_out <- paste(Root, "5_tree_measurement",image_id,sep="/")
if(!file.exists(Path_out))dir.create(Path_out,showWarnings=FALSE, recursive=TRUE)
Path_tmp <- paste(Path_out,"temp",sep="/")
if(!file.exists(Path_tmp))dir.create(Path_tmp,showWarnings=FALSE, recursive=TRUE)
# Open logfile
if(WriteLog){
setwd(Path_out)
sink(file=paste("update_tree_mask_with_im=",image_id,".txt",sep=""))
cat(paste(Sys.time(),"starting","\n",sep=" "))
}
#===========================================================================
# Read tree mask. True position on the ground
# (already corrected for topographic shift)
#===========================================================================
setwd(Path_in)
# The mask from master image. Resides in S:\derived\DG_v8\5_tree_measurement\053734892380_01
#datafile <- paste("corrected_trees_size_shape_top_100_perc_v6.RData",sep="")
# The mask updated with the first slave image
datafile <- paste("Nigeria_updated_","054112895100_01",".RData",sep="")
if(!file.exists(datafile)){
if(WriteLog) cat(paste(Sys.time(),"Error: blobs file for master image not found","\n", sep=" "))
next
}
load(file=datafile)
#Blobs_m <- Master_blobs
Blobs_m <- Tree_mask
Path_ms_images <- paste(Root,"6_image_matching_v2",sep="/")
Path_ms <- paste(Path_ms_images,image_id,sep="/")
Path_pan_images <- paste(Root,"6_matched_pan_v2",sep="/")
Path_pan <- paste(Path_pan_images,image_id,sep="/")
setwd(Path_ms)
Files <- list.files(".",pattern=".tif",ignore.case = TRUE)
aux_files <- grep(".aux",Files,ignore.case = TRUE)
if(length(aux_files)>0) Files <- Files[-aux_files]
if(length(Files)<1){
if(WriteLog)cat(paste(Sys.time(),"Image ",image_id,"Error: cannot identify ms image files","\n",sep=" "))
next
}
ms.imagefn <- Files[1]
setwd(Path_pan)
Files <- list.files(".",pattern=".tif",ignore.case = TRUE)
aux_files <- grep(".aux",Files,ignore.case = TRUE)
if(length(aux_files)>0) Files <- Files[-aux_files]
if(length(Files)<1){
if(WriteLog)cat(paste(Sys.time(),"Image ",image_id,". Error: cannot identify pan image files","\n",sep=" "))
next
}
pan.imagefn <- Files[1]
# Define tiles
Mtile <- 400
Ntile <- 400
# overlap of tiles; prevents loosing points at the margins
tile_over <- 10
ms.imageinfo <- GDALinfo(paste(Path_ms,ms.imagefn,sep="/"), silent=TRUE)
ps.ms <- c(ms.imageinfo[["res.x"]],ms.imageinfo[["res.x"]])
pan.imageinfo <- GDALinfo(paste(Path_pan,pan.imagefn,sep="/"), silent=TRUE)
ps.pan <- c(pan.imageinfo[["res.x"]],pan.imageinfo[["res.x"]])
# Define tiles on the basis of master image
N0.ms <- ms.imageinfo[["rows"]]
M0.ms <- ms.imageinfo[["columns"]]
N0.pan <- pan.imageinfo[["rows"]]
M0.pan <- pan.imageinfo[["columns"]]
#===========================================================================
# Read image geometry: sun and satellite position (determined from metadata)
#===========================================================================
#input <- args[1] #input="053613698020_01"
input <- image_id
image <- paste(input, "_P001_MUL", ".tif", sep = "")
imagebase <- unlist(strsplit(image, "[.]"))[1]
pathmeta <- paste(Root, "0_categ", input, imagebase, sep = "/")
# GET METADATA
setwd(pathmeta)
metadata <- read.table(paste("metadata_", input, ".txt", sep = ""), stringsAsFactors=FALSE)
sun_sat <- as.numeric(unlist(metadata[4:8,2]))
acq_date <- unlist(metadata[11,2])
sun_az <- sun_sat[1] * pi/180
sat_az <- sun_sat[3] * pi/180
theta_sun <- (90-sun_sat[2]) * pi/180
theta_sat <- (90-sun_sat[4]) * pi/180
alpha_sun <- pi/2 - sun_az
alpha_sat <- pi/2 - sat_az
psi <- atan2(tan(theta_sat)*sin(alpha_sat)-(1+tan(theta_sun))*sin(alpha_sun),tan(theta_sat)*cos(alpha_sat)-(1+tan(theta_sun))*cos(alpha_sun))
corr_factor <- sqrt((tan(theta_sat))^2+(1+tan(theta_sun))^2-2*tan(theta_sat)*(1+tan(theta_sun))*cos(alpha_sat-alpha_sun))
#===========================================================================
# Set up band combinations
#===========================================================================
Nb <- ms.imageinfo[["bands"]]
if(Nb==8){
# Set RGB composition
nR <- 7
nG <- 5
nB <- 3
# WV2 8 band NDVI
nir <- 7
red <- 5
}else{
if(Nb==4){
nR <- 4
nG <- 3
nB <- 2
nir <- 4
red <- 2
}else{
nR <- 1
nG <- 1
nB <- 1
}
}
ntx <- ceiling(M0.ms/Mtile)
nty <- ceiling(N0.ms/Ntile)
ix_arr <- 1:ntx
iy_arr <- 1:nty
if(WriteLog) cat(paste(Sys.time()," Tile size ",Mtile," by ",Ntile,"; tile overlap ",tile_over,"\n",sep=""))
if(WriteLog) cat(paste(Sys.time()," Processing tiles ",min(ix_arr),":",max(ix_arr)," by ",min(iy_arr),":",max(iy_arr),"\n",sep=""))
Tree_mask <- data.frame()
Blobs_update <- data.frame()
# process a single tile
#ix <- 13
#iy <- 1
for(ix in ix_arr)
for(iy in iy_arr){
i1 <- max((ix-1)*Mtile + 1 - tile_over,1)
i2 <- min(ix*Mtile + tile_over,M0.ms)
j1 <- max((iy-1)*Ntile + 1 - tile_over,1)
j2 <- min(iy*Ntile + tile_over,N0.ms)
# read image ms subset
ijr <- c(i1,i2,j1,j2)
Path_in <- Path_ms
MS <- read_subset(ms.imagefn,ijr[1],ijr[2],ijr[3],ijr[4])
if(Show_graph){
MSdisp <- MS
for(k in 1:Nb)MSdisp@data[,k] <- histstretch(MS@data[,k])
}
if(is.na(diff(range(MS@data))) || diff(range(MS@data))==0 || median(MS@data[,3])==0){
if(WriteLog) cat(paste(Sys.time()," intersection of image",im," and tile x=",ix,"_y=",iy," is empty. Skipping","\n",sep=""))
next
}
bb <- bbox(MS)
xrl <- bb[1,]
yrl <- bb[2,]
# read pan image subset
ijr_pan <- xy_to_rowcol(cbind(xrl,yrl),pan.imageinfo)
ijr_pan[ijr_pan<0] <- 0
ijr_pan[1] <- ijr_pan[1] + 1
ijr_pan[3] <- ijr_pan[3] + 1
#ijr_pan <- (ijr[]-1)*4 + 1
Path_in <- Path_pan
Pan <- read_subset(pan.imagefn,ijr_pan[1],ijr_pan[2],ijr_pan[3],ijr_pan[4])
MS_arr[[im]] <- MS
Pan_arr[[im]] <- Pan
xy.pan <- coordinates(Pan)
if(Show_graph){
Pandisp <- Pan
k <- 1
Pandisp@data[,k] <- histstretch(Pan@data[,k])
}
# Subset tree mask blobs
ind <- which((Blobs_m$x>xrl[1])&(Blobs_m$x<xrl[2])&(Blobs_m$y>yrl[1])&(Blobs_m$y<yrl[2]))
Blobs_tile <- Blobs_m[ind,]
# Delete blobs with h=0
ind <- which((Blobs_tile$h>0)&(Blobs_tile$hf>0))
if(length(ind)>0){
Blobs_tile <- Blobs_tile[ind,]
# Project master blobs onto master image geometry
h <- Blobs_tile$h
hf <- Blobs_tile$hf
n <- Blobs_tile$n
x <- Blobs_tile$x
y <- Blobs_tile$y
R <- 0.5 * Blobs_tile$d
h1 <- h*hf
h2 <- h-h1
#start_time <- Sys.time()
topo_shift <- project_pollock_quantitative_matrixC(h1, h2, R, theta_sat, n)
#Sys.time() - start_time
x_proj <- x - topo_shift * cos(alpha_sat)
y_proj <- y - topo_shift * sin(alpha_sat)
Blobs_tile_proj <- Blobs_tile
Blobs_tile_proj$x <- x_proj
Blobs_tile_proj$y <- y_proj
} else{
Blobs_tile <- data.frame()
Blobs_tile_proj <- data.frame()
}
if(Show_graph){
windows(record=TRUE)
par(mfrow=c(1,2))
if(Show_ms){
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("mask_v1_",acq_date," true positions",sep=""))
display_all_blobs(Blobs_tile,"white")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("tile x=",ix,"iy=",iy," and projected trees",sep=""))
display_all_blobs(Blobs_tile_proj,"green")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
}
if(!Show_ms){
#if(TRUE){
windows()
par(mfrow=c(1,2))
image(Pandisp,col=gray((0:255)/255),axes=TRUE)
title(main=paste("image",im,"=",acq_date," true positions",sep=""))
display_all_blobs(Blobs_tile,"white")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
image(Pandisp,col=gray((0:255)/255),axes=TRUE)
title(main=paste("tile x=",ix," y=",iy," and projected trees",sep=""))
display_all_blobs(Blobs_tile_proj,"green")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
}
}
# Detect new trees
#================================================================================
# Phase 1: detect larger trees
#================================================================================
# Run tree detection in the MS image
# define range of scale values
# t = sigma^2, in pixels
Dmin <- 0.0
#Dsmall <- 5.0
Dmax <- 40.0
Darr <- seq(from = Dmin, to = Dmax, by = 0.75*mean(ps.ms))
#Darr1 <- seq(from = Dmin, to = Dsmall-0.125*mean(ps.ms), by = 0.125*mean(ps.ms))
#Darr2 <- seq(from = Dsmall, to = Dmax, by = 0.25*mean(ps.ms))
#Darr <- c(Darr1,Darr2)
#tmin <- 0.5*(Dmin/sum(ps.ms))^2
#tmax <- 0.5*(Dmax/sum(ps.ms))^2
#tarr <- seq(from = tmin, to = tmax, length.out=100)
tarr <- 0.5*(Darr/sum(ps.ms))^2
Ns <- length(tarr)
xy.ms <- coordinates(MS)
xrl <- range(xy.ms[,1])
yrl <- range(xy.ms[,2])
y <- data.matrix(MS@data, rownames.force = NA)
ndvi <- (y[,nir]-y[,red])/(y[,nir]+y[,red])
if(Show_graph){
MSdisp <- MS
for(k in 1:Nb)MSdisp@data[,k] <- histstretch(MS@data[,k])
MSdisp$ndvi <- histstretch(ndvi)
if(FALSE){
windows()
par(mfrow=c(1,2))
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("tile x=",ix,"y=",iy,sep=""))
#windows()
image(MSdisp,attr="ndvi",col=gray((0:255)/255),axes=TRUE)
title(main=paste("NDVI"," tile x=",ix,"y=",iy,sep=""))
}
}
MS$ndvi <- ndvi
#load(file=paste("allblobs_image_",im,"_tx_",ix,"_ty_",iy,".RData",sep=""))
#if((ix==1)&(iy==1)) Blobs_all <- Blobs else Blobs_all <- rbind(Blobs_all,Blobs)
M <- MS@grid@cells.dim[1]
N <- MS@grid@cells.dim[2]
P <- MS$ndvi
Debug <- FALSE
xTL <- xy.ms[1,1]
yTL <- xy.ms[1,2]
#Thresholds for magnitude and ndvi
magn_thresh <- 1.0e-04
ndvi_thresh <- 0.10
# Interest point detection
start_time <- Sys.time()
Blobs <- detect_blobs_v3(P, M, N, ps.ms, xTL, yTL, tarr, magn_thresh)
end_time <- Sys.time()
end_time - start_time
# Evaluate ndvi of blobs
ndvi_blobs <- measure_blob_ndvi(Blobs, M, N, ndvi, ps.ms, xTL,yTL)
Blobs$ndvi <- ndvi_blobs
ind <- which(Blobs$ndvi >= ndvi_thresh)
if(length(ind)>0){
Blobs <- Blobs[ind,]
} else Blobs <- data.frame(array(0,c(0,5)))
if(nrow(Blobs)>1000){
ind <- order(Blobs$magn, decreasing=TRUE)
ind <- ind[1:1000]
Blobs <- Blobs[ind,]
}
if(nrow(Blobs)>0){
# Delete objects near image margins
dx1 <- Blobs$x - xrl[1]
dx2 <- -Blobs$x + xrl[2]
dy1 <- Blobs$y - yrl[1]
dy2 <- -Blobs$y + yrl[2]
ind <- which(pmin(dx1,dx2,dy1,dy2) <= 0.5*Blobs$d + mean(ps.ms))
if(length(ind)>0) Blobs <- Blobs[-ind,]
}
if(nrow(Blobs)>0){
Blobs <- Blobs[Blobs$d>=5.0,]
if(nrow(Blobs)>0) Blobs <- clean_cocentric_blobs(Blobs)
}
if(FALSE) if(Show_graph){
windows()
par(mfrow=c(1,2))
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("tile x=",ix,"y=",iy,sep=""))
display_all_blobs(Blobs,"white")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="green")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4)
#windows()
#image(MSdisp,attr="ndvi",col=gray((0:255)/255),axes=TRUE)
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("NDVI"," tile x=",ix,"y=",iy,sep=""))
display_all_blobs(Blobs_tile_proj,"green")
display_all_blobs(Blobs,"white")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="green")
}
if(nrow(Blobs)>0) Large_blobs <- clean_overlap_tree_mask(Blobs) else Large_blobs <- Blobs
if(FALSE) if(Show_graph){
windows()
par(mfrow=c(1,2))
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("Larger_trees"," tile x=",ix,"y=",iy,sep=""))
display_all_blobs(Large_blobs,"white")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="green")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4)
#windows()
#image(MSdisp,attr="ndvi",col=gray((0:255)/255),axes=TRUE)
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("Larger_trees"," NDVI"," tile x=",ix,"y=",iy,sep=""))
display_all_blobs(Blobs_tile_proj,"green")
display_all_blobs(Large_blobs,"white")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="green")
}
#================================================================================
# Phase 2: detect smaller trees
#================================================================================
if(TRUE){
# Run tree detection in the MS image
# define range of scale values
# t = sigma^2, in pixels
Dmin <- 0.0
#Dsmall <- 5.0
Dmax <- 8.0
Darr <- seq(from = Dmin, to = Dmax, by = 0.1*mean(ps.ms))
#Darr1 <- seq(from = Dmin, to = Dsmall-0.125*mean(ps.ms), by = 0.125*mean(ps.ms))
#Darr2 <- seq(from = Dsmall, to = Dmax, by = 0.25*mean(ps.ms))
#Darr <- c(Darr1,Darr2)
#tmin <- 0.5*(Dmin/sum(ps.ms))^2
#tmax <- 0.5*(Dmax/sum(ps.ms))^2
#tarr <- seq(from = tmin, to = tmax, length.out=100)
tarr <- 0.5*(Darr/sum(ps.ms))^2
Ns <- length(tarr)
#Thresholds for magnitude and ndvi
magn_thresh <- 1.0e-03
ndvi_thresh <- 0.15
# Interest point detection
start_time <- Sys.time()
Blobs <- detect_blobs_v3(P, M,N,ps.ms, xTL, yTL, tarr, magn_thresh)
end_time <- Sys.time()
end_time - start_time
# Evaluate ndvi of blobs
ndvi_blobs <- measure_blob_ndvi(Blobs, M, N, ndvi, ps.ms, xTL,yTL)
Blobs$ndvi <- ndvi_blobs
ind <- which(Blobs$ndvi >= ndvi_thresh)
if(length(ind)>0){
Blobs <- Blobs[ind,]
} else Blobs <- data.frame(array(0,c(0,5)))
if(FALSE)if(nrow(Blobs)>0){
# Delete objects near image margins
dx1 <- Blobs$x - xrl[1]
dx2 <- -Blobs$x + xrl[2]
dy1 <- Blobs$y - yrl[1]
dy2 <- -Blobs$y + yrl[2]
ind <- which(pmin(dx1,dx2,dy1,dy2) <= 0.5*Blobs$d + mean(ps.ms))
if(length(ind)>0) Blobs <- Blobs[-ind,]
}
if(nrow(Blobs)>0){
Blobs <- clean_cocentric_blobs(Blobs)
Small_blobs <- blobs_contained(Blobs, Large_blobs)
}else Small_blobs <- Blobs
#Blobs <- Small_blobs
}
Blobs <- rbind(Large_blobs, Small_blobs)
#Blobs <- Large_blobs
if(nrow(Blobs)>1000){
ind <- order(Blobs$magn, decreasing=TRUE)
ind <- ind[1:1000]
Blobs <- Blobs[ind,]
}
# Check which of the newly detected trees are not yet present in the mask
# Identify non-redundant trees
x1 <- Blobs_tile_proj$x
y1 <- Blobs_tile_proj$y
R1 <- 0.5*Blobs_tile_proj$d
x2 <- Blobs$x
y2 <- Blobs$y
R2 <- 0.5*Blobs$d
ind_red <- array(1,nrow(Blobs))
if(nrow(Blobs)>0) for(k in 1:nrow(Blobs)){
d_arr <- sqrt(((x1-x2[k])^2) + ((y1-y2[k])^2))
#if(all(d_arr > 0.5*(R1+R2[k]))) ind_red[k] <- 0
if(all(d_arr > pmax(R1,R2[k]))) ind_red[k] <- 0
}
if(Show_graph){
windows()
par(mfrow=c(1,2))
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("new candidates tile x=",ix,"y=",iy,sep=""))
display_all_blobs(Blobs,"white")
if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="white")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4)
#windows()
#image(MSdisp,attr="ndvi",col=gray((0:255)/255),axes=TRUE)
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("redundancy check",sep=""))
display_all_blobs(Blobs_tile_proj,"white")
display_all_blobs(Blobs[ind_red==0,],"green")
display_all_blobs(Blobs[ind_red==1,],"blue")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="green")
}
if(WriteLog)cat(paste(Sys.time()," Tile ix=",ix," iy=",iy," candidates found: ",nrow(Blobs),"\n",sep=""))
# Measure tree size for the non-redundant trees
Blobs <- Blobs[ind_red==0,]
if(WriteLog)cat(paste(Sys.time()," non-redundant candidates: ",nrow(Blobs),"\n",sep=""))
if(nrow(Blobs)>0){
Blobs$h <- NA
Blobs$hf <- NA
Blobs$n <- NA
#if(Show_graph) image(Pan,col=gray((0:255)/255),axes=TRUE)
xyTL <- c(min(xy.pan[,1]),max(xy.pan[,2]))
M <- Pan@grid@cells.dim[1]
N <- Pan@grid@cells.dim[2]
#if(Show_graph) display_all_blobs(Blobs[id,],"white")
#Show_graph <- FALSE
if(Show_graph) windows()
#id <- 11
for(id in 1:nrow(Blobs)){
#if(Show_graph) windows()
A <- Pan
pan <- A$band1
xy <- xy.pan
ps <- ps.pan
xy0 <- c(Blobs$x[id],Blobs$y[id]) # Centroid of observed blob
R <- as.numeric(0.5*Blobs$d[id]) # and its radius
i <- 1 + round((xy0[1]-xyTL[1])/ps.pan[1])
j <- 1 + round((xyTL[2]-xy0[2])/ps.pan[2])
pn <- i + (j-1)*M
#if(Show_graph) points(xy.pan[pn,1],xy.pan[pn,2],col="blue",pch=16)
# Identify relevant subset of the pan image
S_max <- R * corr_factor * 2.0
i1 <- i - round(2*R/ps.pan[1])
i2 <- i + round(2*R/ps.pan[1])
j1 <- j - round(2*R/ps.pan[2])
j2 <- j + round(2*R/ps.pan[2])
if(cos(psi)>0){
i2 <- i2 + round(S_max*cos(psi)/ps.pan[1])
}else i1 <- i1 + round(S_max*cos(psi)/ps.pan[1])
if(sin(psi)>0){
j1 <- j1 - round(S_max*sin(psi)/ps.pan[2])
}else j2 <- j2 - round(S_max*sin(psi)/ps.pan[2])
j1 <- max(c(j1,1))
j2 <- min(c(j2),N)
i1 <- max(c(i1,1))
i2 <- min(c(i2,M))
Pan_sub <- Pan[j1:j2,i1:i2]
# Analyse shadow region
# Randomize segmentation
pan_sub <- Pan_sub@data$band1
dsub <- Pan_sub@grid@cells.dim
Msub <- dsub[1]
Nsub <- dsub[2]
xy <- coordinates(Pan_sub)
if(Show_graph){
#windows()
image(Pan_sub,col=gray((0:255)/255),axes=TRUE)
title(main=id)
display_all_blobs(Blobs[id,],"white")
}
xyc <- xy
xyc[,1] <- xyc[,1] - xy0[1]
xyc[,2] <- xyc[,2] - xy0[2]
rc <- sqrt(rowSums(xyc^2))
phic <- atan2(xyc[,2],xyc[,1])
ind <- which((abs(phic-psi)<=pi/4)&(rc<=2*R)&(rc>0.75*R))
ind_seed <- ind[which.min(pan_sub[ind])]
if(length(ind_seed)==0) next
min_val <- pan_sub[ind_seed]+1
max_val <- max(pan_sub,na.rm=TRUE)-1
if(min_val>=max_val-5){
next
}
shad_thr_arr <- seq(min_val,max_val,5)
#shad_thr_arr <- seq(pan_sub[ind_seed]+1,270,5)
Nobs <- length(shad_thr_arr)
cover_fun <- array(0,Msub*Nsub)
area_arr <- array(0,0)
for(i in 1:Nobs){
#f <- pan
f <- pan_sub
f[] <- 0
f[pan_sub<shad_thr_arr[i]] <- 1
#if(f[ind_seed]==0){
# # no shadow found
# next
#}
shad <- Grow_region_seedC(f,Msub,Nsub,ind_seed)
area_arr <- c(area_arr,length(shad))
cover_fun[shad] <- cover_fun[shad]+1
#if(Show_graph) points(xy[shad,,drop=F],col="green",cex=0.2,pch=16)
# Too large area, neglect
if(length(shad)*prod(ps.pan)> 0.5*pi*(R^2)){
if(i>5){
area_rate <- diff(area_arr)
if(area_rate[i-1] > 5*area_rate[i-2]) break
}
if(length(shad)*prod(ps.pan)> 3.75*pi*(R^2)/corr_factor) break
}
}
#windows()
#plot(shad_thr_arr[1:length(area_arr)],area_arr)
#windows()
#plot(shad_thr_arr[1:length(area_rate)],area_rate)
Nobs_act <- i-3
if(Nobs_act<2) next
cover_fun <- cover_fun/Nobs_act
#summary(cover_fun)
# p-level set, median
ind_median <- which(cover_fun>=0.5)
#if(Show_graph) points(xy[ind_median,],col="green",cex=0.2,pch=16)
#points(xy[,],col="green",cex=cover_fun,pch=16)
shad <- ind_median
if(length(shad)<2) next
# delete pixels that are inside the apparent crown
xys <- xy[shad,,drop=FALSE]
xys[,1] <- xys[,1] - xy0[1]
xys[,2] <- xys[,2] - xy0[2]
r <- sqrt(rowSums(xys^2))
ind <- which(r>=R)
shad <- shad[ind]
if(Show_graph) points(xy[shad,],col="red",cex=0.2,pch=16)
if(length(shad)==0) next
xy_shad_pix <- xy[shad,,drop=FALSE]
# Initial estimate of h
h0 <- 30.0
hf <- 0.75
n <- 2.0
#h <- 0.5
h <- 2*R/h0
h_min <- 0
h_max <- 2.0
hf_min <- 0
hf_max <- 0.9
n_min <- 2.0
n_max <- 2.0
dh <- 0.1
dhf <- 0.1
dn <- 0.0
obj_fun <- eval_shad_v6
if(Debug){
params <- c(h,hf,n)
dpar <- c(dh,dhf,dn)
lower <- c(h_min,hf_min,n_min)
upper <-c(h_max,hf_max,n_max)
}
grid_fit <- grid_optim_v4(c(h,hf,n),obj_fun,dpar=c(dh,dhf,dn),lower=c(h_min,hf_min,n_min),upper=c(h_max,hf_max,n_max))
not_optim <- TRUE
iter <- 0
eps <- 0.001
err_arr <- array(0,0)
err_arr <- c(err_arr,grid_fit[4])
h <- grid_fit[1]
h <- max(c(h,0))
h <- min(c(h,2))
#h_min <- max(c(h-0.2,0))
#h_max <- min(c(h+0.2,2))
h_min <- grid_fit[5]
h_max <- grid_fit[6]
dh <- max(c((h_max-h_min)/10,0.01))
hf <- grid_fit[2]
hf <- max(c(hf,0))
hf <- min(c(hf,0.9))
dhf <- max(c((hf_max-hf_min)/5,0.01))
#hf_min <- max(c(hf-0.1,0))
#hf_max <- min(c(hf+0.1,0.9))
hf_min <- grid_fit[7]
hf_max <- grid_fit[8]
n <- grid_fit[3]
n <- max(c(n,1))
n <- min(c(n,3))
#n_min <- max(c(n-0.2,1))
#n_max <- min(c(n+0.2,3))
n_min <- grid_fit[9]
n_max <- grid_fit[10]
#dn <- max(c((n_max-n_min)/10,0.1))
dn <- 0
if(Debug) if(Show_graph) draw_shad_fit(paste("ix=",ix," iy=",iy," id=",id,sep=""))
while(not_optim){
if(Debug){
params <- c(h,hf,n)
dpar <- c(dh,dhf,dn)
lower <- c(h_min,hf_min,n_min)
upper <-c(h_max,hf_max,n_max)
}
grid_fit2 <- grid_optim_v4(c(h,hf,n),obj_fun,dpar=c(dh,dhf,dn),lower=c(h_min,hf_min,n_min),upper=c(h_max,hf_max,n_max))
iter <- iter+1
err_arr <- c(err_arr,grid_fit[4])
h <- grid_fit[1]
h <- max(h,h_min)
h <- min(h,h_max)
hf <- grid_fit[2]
hf <- max(hf,hf_min)
hf <- min(hf,hf_max)
n <- grid_fit[3]
n <- max(n,n_min)
n <- min(n,n_max)
if(Debug) draw_shad_fit(paste("ix=",ix," iy=",iy," id=",id,sep=""))
#converg <- sqrt(mean((grid_fit2-grid_fit)^2))
converg <- abs(grid_fit2[4]-grid_fit[4])
grid_fit <- grid_fit2
if((converg<=eps)|(iter>10)) not_optim <- FALSE
}
#if(Show_graph){
# #windows()
# image(Pan_sub,col=gray((0:255)/255),axes=TRUE)
# display_all_blobs(Blobs[id,],"green")
#}
if(Show_graph){
draw_shad_fit(paste("ix=",ix," iy=",iy," id=",id,sep=""))
#points(xy[ind_median,],col="green",cex=0.2,pch=16)
#points(xy[shad,],col="red",cex=0.1,pch=16)
}
# Add the min margin
#draw_shadow(xy0,R,h*h0,hf_min,n,theta_sat,alpha_sat,theta_sun,alpha_sun)
#draw_shadow(xy0,R,h*h0,hf_max,n,theta_sat,alpha_sat,theta_sun,alpha_sun)
#draw_shadow(xy0,R,h*h0,hf,3,theta_sat,alpha_sat,theta_sun,alpha_sun)
#draw_shadow(xy0,R,h*h0,0,3,theta_sat,alpha_sat,theta_sun,alpha_sun)
Blobs$h[id] <- h*h0
Blobs$hf[id] <- hf
Blobs$n[id] <- n
}
#ind <- which(!is.na(Blobs$h))
#ind <- which(!is.na(Blobs$h) & Blobs$h>0 & Blobs$hf>0)
ind <- which(!is.na(Blobs$h) & Blobs$h>0)
if(length(ind)>0) Blobs <- Blobs[ind,] else Blobs <- data.frame()
}
if(WriteLog)cat(paste(Sys.time()," add ",nrow(Blobs)," candidates to the mask","\n",sep=""))
#Show_graph <- TRUE
if(FALSE)if(Show_graph){
windows()
par(mfrow=c(1,2))
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("tile x=",ix,"y=",iy,sep=""))
display_all_blobs(Blobs,"white")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="green")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4)
#windows()
#image(MSdisp,attr="ndvi",col=gray((0:255)/255),axes=TRUE)
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("NDVI"," tile x=",ix,"y=",iy,sep=""))
display_all_blobs(Blobs_tile_proj,"white")
display_all_blobs(Blobs,"green")
#if(nrow(Blobs)>0)text(Blobs$x,Blobs$y,labels=1:nrow(Blobs),pos=4,col="green")
}
# Shift new trees to the true position
Blobs_add <- Blobs
if(nrow(Blobs)>0){
h <- Blobs$h
hf <- Blobs$hf
n <- Blobs$n
x <- Blobs$x
y <- Blobs$y
R <- 0.5 * Blobs$d
h1 <- h*hf
h2 <- h-h1
#start_time <- Sys.time()
topo_shift <- project_pollock_quantitative_matrixC(h1, h2, R, theta_sat, n)
#Sys.time() - start_time
x <- x + topo_shift * cos(alpha_sat)
y <- y + topo_shift * sin(alpha_sat)
Blobs_add$x <- x
Blobs_add$y <- y
}
# Add new trees to the mask
#Blobs_m <- rbind(Blobs_m,Blobs_add)
Blobs_update <- rbind(Blobs_update,Blobs_add)
Blobs_tile <- rbind(Blobs_tile,Blobs_add)
Tree_mask <- rbind(Tree_mask,Blobs_tile)
setwd(Path_tmp)
save(Blobs_tile, file=paste("updated_trees_tile_","ix=",ix,"iy=",iy,".RData",sep=""))
if(WriteLog)cat(paste(Sys.time()," tile ix=",ix," iy=",iy," is updated and contains ",nrow(Blobs_tile)," trees","\n",sep=""))
# Display updated tree mask
# Project master blobs onto master image geometry
Blobs_tile_proj <- Blobs_tile
if(nrow(Blobs_tile)>0){
h <- Blobs_tile$h
hf <- Blobs_tile$hf
n <- Blobs_tile$n
x <- Blobs_tile$x
y <- Blobs_tile$y
R <- 0.5 * Blobs_tile$d
h1 <- h*hf
h2 <- h-h1
#start_time <- Sys.time()
topo_shift <- project_pollock_quantitative_matrixC(h1, h2, R, theta_sat, n)
#Sys.time() - start_time
x_proj <- x - topo_shift * cos(alpha_sat)
y_proj <- y - topo_shift * sin(alpha_sat)
Blobs_tile_proj$x <- x_proj
Blobs_tile_proj$y <- y_proj
}
#windows(record=TRUE)
#par(mfrow=c(1,2))
if(Show_graph){
if(Show_ms){
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("updated mask"," image",im,"=",acq_date," true positions",sep=""))
display_all_blobs(Blobs_tile,"white")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
image(MSdisp,red=nR,green=nG,blue=nB,axes=TRUE)
title(main=paste("tile x=",ix,"iy=",iy," and projected trees",sep=""))
display_all_blobs(Blobs_tile_proj,"green")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
}
if(!Show_ms){
#if(TRUE){
windows()
par(mfrow=c(1,2))
image(Pandisp,col=gray((0:255)/255),axes=TRUE)
title(main=paste("image",im,"=",acq_date," true positions",sep=""))
display_all_blobs(Blobs_tile,"white")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
image(Pandisp,col=gray((0:255)/255),axes=TRUE)
title(main=paste("tile x=",ix," y=",iy," and projected trees",sep=""))
display_all_blobs(Blobs_tile_proj,"green")
# Add shadow contour to master image
if(nrow(Blobs_tile)>0) for(id in 1:nrow(Blobs_tile)){
#xtrue <- Blobs_tile_proj_m$x[id]
#ytrue <- Blobs_tile_proj_m$y[id]
xtrue <- Blobs_tile$x[id]
ytrue <- Blobs_tile$y[id]
shad_pol <- project_pollockC(h1[id],h2[id],R[id],xtrue,ytrue,theta_sun,alpha_sun,n[id],0.25)
#shad_pol <- project_pollock(h1[id],h2[id],R_m[id],xtrue,ytrue,theta_sun_m,n[id],alpha_sun_m)
lines(shad_pol,col="yellow")
}
}
}
}
setwd(Path_out)
save(Tree_mask, file=paste("updated_trees",".RData",sep=""))
# Close logfile
if(WriteLog){
cat(paste(Sys.time(),"Updated tree mask contains",nrow(Tree_mask),"trees","\n",sep=" "))
cat(paste(Sys.time(),"Process ended","\n",sep=" "))
sink()
}
#==================================================================================
# The End
#==================================================================================
|
a3664b6bfff27b4634d35dbc7998af94f03db081
|
0d725654ae06c6a2c09613789b8c46e8b7f39539
|
/man/getPath.Rd
|
222c13fdb2dad0e6717f058c08af30679c496bbc
|
[] |
no_license
|
guillemr/robust-fpop
|
904d2672b0cf9b5f70a280c75da0c4417e1e0b48
|
ce49c26aa5b5eee84a835e69cbb7ca572f669792
|
refs/heads/master
| 2020-12-26T03:23:28.382328
| 2019-07-02T13:11:37
| 2019-07-02T13:11:37
| 68,607,744
| 10
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
rd
|
getPath.Rd
|
\name{getPath}
\alias{getPath}
\title{getPath}
\description{This function is used by the Rob_seg function to recover the best segmentation from 1:n from the C output}
\usage{getPath(path, i)}
\arguments{
\item{path}{the path vector of the "Rob_seg" function}
\item{i}{the last position to consider in the path vector}
}
\value{return a vector with the best change-points w.r.t. to L2 to go from point 1 to i}
\author{Guillem Rigaill}
|
97f325e170c371a80f2d7e4822b1b08b0ed61670
|
af286c8e4688c1ca310605d33d74ac6bc6f0cf5e
|
/man/getReactableState.Rd
|
41c16c0bdd0f56641e8b3f61cb621748d2b50ab5
|
[
"MIT"
] |
permissive
|
glin/reactable
|
999d3385bad36c4273f9766d8a8663b42a88cef4
|
86bd27670eac8fb330a50413f462cf1fe0ff8e88
|
refs/heads/main
| 2023-08-29T11:15:04.340315
| 2023-07-14T20:33:39
| 2023-07-14T20:33:39
| 178,748,690
| 594
| 84
|
NOASSERTION
| 2023-01-08T17:30:20
| 2019-03-31T22:22:16
|
JavaScript
|
UTF-8
|
R
| false
| true
| 2,969
|
rd
|
getReactableState.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny.R
\name{getReactableState}
\alias{getReactableState}
\title{Get the state of a reactable instance}
\usage{
getReactableState(outputId, name = NULL, session = NULL)
}
\arguments{
\item{outputId}{The Shiny output ID of the \code{reactable} instance.}
\item{name}{Character vector of state value(s) to get. Values must be one of \code{"page"},
\code{"pageSize"}, \code{"pages"}, \code{sorted}, or \code{"selected"}. If unspecified, all values will
be returned.}
\item{session}{The Shiny session object. Defaults to the current Shiny session.}
}
\value{
If \code{name} is specified, one of the following values:
\itemize{
\item \code{page}: the current page
\item \code{pageSize}: the page size
\item \code{pages}: the number of pages
\item \code{sorted}: the sorted columns - a named list of columns with values of \code{"asc"} for
ascending order or \code{"desc"} for descending order, or \code{NULL} if no columns are sorted
\item \code{selected}: the selected rows - a numeric vector of row indices, or \code{NULL} if no rows are selected
}
If \code{name} contains more than one value, \code{getReactableState()} returns a named list of
the specified values.
If \code{name} is unspecified, \code{getReactableState()} returns a named list containing all values.
If the table has not been rendered yet, \code{getReactableState()} returns \code{NULL}.
}
\description{
\code{getReactableState()} gets the state of a reactable instance within a Shiny application.
}
\examples{
# Run in an interactive R session
if (interactive()) {
library(shiny)
library(reactable)
library(htmltools)
ui <- fluidPage(
actionButton("prev_page_btn", "Previous page"),
actionButton("next_page_btn", "Next page"),
reactableOutput("table"),
verbatimTextOutput("table_state"),
uiOutput("selected_row_details")
)
server <- function(input, output) {
output$table <- renderReactable({
reactable(
MASS::Cars93[, 1:5],
showPageSizeOptions = TRUE,
selection = "multiple",
onClick = "select"
)
})
output$table_state <- renderPrint({
state <- req(getReactableState("table"))
print(state)
})
observeEvent(input$prev_page_btn, {
# Change to the previous page
page <- getReactableState("table", "page")
if (page > 1) {
updateReactable("table", page = page - 1)
}
})
observeEvent(input$next_page_btn, {
# Change to the next page
state <- getReactableState("table")
if (state$page < state$pages) {
updateReactable("table", page = state$page + 1)
}
})
output$selected_row_details <- renderUI({
selected <- getReactableState("table", "selected")
req(selected)
details <- MASS::Cars93[selected, -c(1:5)]
tagList(
h2("Selected row details"),
tags$pre(
paste(capture.output(print(details, width = 1200)), collapse = "\n")
)
)
})
}
shinyApp(ui, server)
}
}
|
d65e10efbdba19e0c447f8ee4a607d30609f8e60
|
2ead1fef38f9c97740374896c591e690c9577c28
|
/Community/Code/ZipInTX.R
|
f8697372c6e152c80a2e20d4b3b4554e29816ded
|
[] |
no_license
|
aditinabar/MapTheGap
|
5424d025d18e3bf2ffc1c04aa10ae29899add1f9
|
3d919f30ea6bd3cc7c550ed99d68fa2922bcefdd
|
refs/heads/master
| 2016-09-06T17:45:44.086357
| 2015-06-05T17:33:40
| 2015-06-05T17:33:40
| 31,827,298
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,168
|
r
|
ZipInTX.R
|
setwd("C:/Users/naditi/Projects/MapTheGap/Community/Data/")
US <- read.table("US Zipcodes.csv", sep = ",", header = TRUE, stringsAsFactor = TRUE)
US[,which(colnames(US) == "world_region")] <- NULL
US[,which(colnames(US) == "timezone")] <- NULL
US[,which(colnames(US) == "county")] <- NULL
US[,which(colnames(US) == "area_codes")] <- NULL
zip <- US[US$type=="STANDARD",]
row.names(zip) <- NULL
tx <- zip[zip$state == "TX",]
row.names(tx) <- NULL
setwd("C:/Users/naditi/Projects/MapTheGap/Community/Data/Clean")
com <- read.table("Community.csv", header = TRUE, sep = ",", stringsAsFactors = TRUE)
## Winning Method
texas2 <- com[com$zip %in% tx$zip,]
#
# ziptx <- as.vector(tx$zip)
# #as.vector(unlist(ziptx), mode = "numeric")
#
# zipcom <- as.vector(com$Zip)
# #as.vector(unlist(zipcom), mode = "numeric")
#
# Texas <- vector()
# ext <- vector()
# ## Method A
# for (i in 1:1849){
# if((zipcom[i] %in% ziptx) == "TRUE"){
# print(zipcom[i])
# Texas[i] <- zipcom[i]
# }
# else if(((as.numeric(zipcom[i]) %in% unlist(ziptx))) == "FALSE"){
# ext[i] <- as.numeric(zipcom[i])
# }
# }
# Testing
v <- c("a", "b", "c", "d")
"d" %in% v
|
0c3fb26a5219278ef37726ea557c1b0978085dba
|
4344efed9e7b5b01134e2fab68587878fba8bf53
|
/CASP/CASPplotter.R
|
7789e63fbe15e5b3376dfe32ec268132c9607408
|
[] |
no_license
|
mrkeppler/WWU-Projects
|
1d7e92e7ab7a8d575dd844c2df07ca3d6aa36d43
|
4576683fe11a69517457b6a56bca605e876a2874
|
refs/heads/master
| 2023-05-03T08:53:44.597702
| 2021-05-21T22:00:41
| 2021-05-21T22:00:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,544
|
r
|
CASPplotter.R
|
setwd('D:/Bork/Desktop')
data = read.table('results.dat',header=T)
mat = sqrt(as.matrix(data))
colnames(mat) = 1:20
T0949 = mat[1:3,]
T0950 = mat[4,]
T0951 = mat[c(6,5,7),]
T0953s1 = mat[c(9,8,10),]
T0953s2 = mat[c(11,13,12),]
pdf('barplots.pdf')
barplot(T0949, xlab = 'Structure', ylab = 'MSD', main = 'T0949',border = NA,
col = c('seagreen2', 'royalblue', 'sandybrown'),
legend = c('3X1E','4HPO','1SQB'), args.legend = c(bg = NA,bty = 'n'))
abline(min(colSums(T0949)),0,lty = "dashed")
barplot(T0950, xlab = 'Structure', ylab = 'MSD', main = 'T0950',border = NA,
col = c('sandybrown'),
legend = c('6EK4'), args.legend = c(bg = NA,bty = 'n'))
abline(min(T0950),0,lty = "dashed")
barplot(T0951, xlab = 'Structure', ylab = 'MSD', main = 'T0951',border = NA,
col = c('seagreen2', 'royalblue', 'sandybrown'),
legend = c('5CBK','3W06','5DNU'), args.legend = c(bg = NA,bty = 'n'))
abline(min(colSums(T0951)),0,lty = "dashed")
barplot(T0953s1, xlab = 'Structure', ylab = 'MSD', main = 'T0953s1',border = NA,
col = c('seagreen2', 'royalblue', 'sandybrown'),
legend = c('2VCY','2GMQ','4EBB'), args.legend = c(bg = NA,bty = 'n'))
abline(min(colSums(T0953s1)),0,lty = "dashed")
barplot(T0953s2, xlab = 'Structure', ylab = 'MSD', main = 'T0953s2',border = NA,
col = c('seagreen2', 'royalblue', 'sandybrown'),
legend = c('3EEH','6CN1','3JSA'), args.legend = c(bg = NA,bty = 'n'))
abline(min(colSums(T0953s2)),0,lty = "dashed")
dev.off()
|
c24b9a80fe882415f36c16f351e95562bb56ef9c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/highfrequency/examples/rCov.Rd.R
|
934f20c9b7049b7823066fc74ff239421fdb5fba
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
rCov.Rd.R
|
library(highfrequency)
### Name: rCov
### Title: Realized Covariance
### Aliases: rCov
### Keywords: volatility
### ** Examples
# Realized Variance/Covariance for CTS aligned
# at 5 minutes.
data(sample_tdata);
data(sample_5minprices_jumps);
# Univariate:
rv = rCov( rdata = sample_tdata$PRICE, align.by ="minutes",
align.period =5, makeReturns=TRUE);
rv
# Multivariate:
rc = rCov( rdata = sample_5minprices_jumps['2010-01-04'], makeReturns=TRUE);
rc
|
77143359120291b9d0530e277a4503f579f308d2
|
dbb6c3f594656f0c990b32821e572e352278741c
|
/run_analysis.R
|
9e4a2fdcbbd24ade713d388890e21575677188cf
|
[] |
no_license
|
amirzoev/ReadAndCleanData-Assignment
|
44d16eb3751b62bffb9da8df738446cc17cc2f5a
|
510bb891fe2491b2c4b61cbee25f0bdbbc7bac20
|
refs/heads/master
| 2020-06-02T07:03:46.074115
| 2014-04-27T22:34:57
| 2014-04-27T22:34:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,808
|
r
|
run_analysis.R
|
#Merges the training and the test sets to create one data set.
test_set<-read.table('./UCI HAR Dataset/test/X_test.txt')
train_set<-read.table('./UCI HAR Dataset/train/X_train.txt')
set<-rbind(test_set,train_set) # resulting set
act_testset<-read.table('./UCI HAR Dataset/test/y_test.txt')
act_trainset<-read.table('./UCI HAR Dataset/train/y_train.txt')
act<-rbind(act_testset,act_trainset)
subj_test<-read.table('./UCI HAR Dataset/test/subject_test.txt')
subj_train<-read.table('./UCI HAR Dataset/train/subject_train.txt')
subj<-rbind(subj_test,subj_train)
names(subj)<-'subject'
# Extracts only the measurements on the mean and standard deviation for each measurement
feat<-read.table("./UCI HAR Dataset/features.txt")
feat_fltr<-feat[grepl("mean()",feat$V2,fixed=TRUE) | grepl("std()",feat$V2,fixed=TRUE),] # filter required features
set_fltr<-set[feat_fltr$V1] # Filtered set: Only mean() and std()
#Appropriately labels the data set with descriptive variable or feature (column) names"
names(set_fltr)<-feat_fltr$V2 # Rename columns in the dataset
#Uses descriptive activity names to name the activities in the data set
act_label<-read.table('./UCI HAR Dataset/activity_labels.txt')
act_named<-lapply(act, function(x) act_label[x,]$V2)
names(act_named)<-c('activity')
# Join 3 datasets together:
set_joined<-cbind(cbind(set_fltr,act_named,subj))
#Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
## First we melt the dataset
set_melt<-melt(set_joined,id=c("subject","activity"),measure.vars=names(set_joined)[1:66])
## Cast the dataset and calculate mean at the same time. We average over each activity of each specific person.
mytidyset<-dcast(set_melt, subject + activity ~ variable, mean)
mytidyset # the resulting tidy dataset
|
abde02fc3d9722d256cfffb5e790b1de027d93d6
|
7b74f00cd80694634e6925067aaeb6572b09aef8
|
/2019/Assignment/FE8828-Iman Taleb/Assignment 4/ex2. bookoptiontrades.R
|
57d5075c27e9aa1ebd645f640884ae2b8c38b899
|
[] |
no_license
|
leafyoung/fe8828
|
64c3c52f1587a8e55ef404e8cedacbb28dd10f3f
|
ccd569c1caed8baae8680731d4ff89699405b0f9
|
refs/heads/master
| 2023-01-13T00:08:13.213027
| 2020-11-08T14:08:10
| 2020-11-08T14:08:10
| 107,782,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 983
|
r
|
ex2. bookoptiontrades.R
|
library(fOptions)
library(dplyr)
library(ggplot2)
#Valuation Calculation
callputs<-mutate(callputs,Value=`Open Interest`*(Bid+Ask)/2)
#Total Valuation of Calls and Puts alone
group_by(callputs,Type)%>%
summarise(`Total Valuation`=sum(Value))
#Total Valuation of both calls and puts
summarise(callputs,Total=sum(Value))
#In the money
atm<-callputs %>%
filter((Type=="c" & (Strike<Underlying))|(Type=="p" & (Strike>Underlying)))
#Total Open Interest
summarise(atm,`At The Money`=sum(`Open Interest`))
#Plot Strike vs Volatility
vol<-callputs %>%
filter((Type=="c" & (Strike>Underlying))|(Type=="p" & (Strike<Underlying)))
# YY: need to use rowwise
vol<-mutate(vol,Volatility=GBSVolatility(Value,Type,Strike,Underlying,
as.numeric((as.Date("2019-12-20")-as.Date(Expiry)))/365,
r=0.03,b=0))
vol %>%
ggplot(aes(x=Strike,y=Volatility))+
geom_point()+
geom_smooth()
|
feb01a758b0446251e344462bb165a7f1c9646ad
|
5fc672d84618a45c16542dc8680fa3d41937ce22
|
/R/R2G2/man/Plots2GE.Rd
|
a55fd0e35e97dd4148948ebe36b5f12b83863c5c
|
[] |
no_license
|
arrigon/R2G2
|
4dccffe82d01660b13eee54598a21de561cc1928
|
f49f292c903879295ddb84c676f02f80883a9db5
|
refs/heads/master
| 2021-01-11T00:14:27.612052
| 2016-10-11T11:44:57
| 2016-10-11T11:44:57
| 70,573,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,452
|
rd
|
Plots2GE.Rd
|
\name{Plots2GE}
\alias{Plots2GE}
\title{
Georeferencing custom R plots into Google Earth
}
\description{
Plots2GE: Places PNG R plots on Google Earth, as KML files.
}
\usage{
Plots2GE(data, center, nesting = 0, customfun, goo = "Plots2GE.kml", testrun = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
Dataset used for producing the plots (will be the input of your customfun, see below).
}
\item{center}{
Matrix including the longitude(s) and latitude(s) of point(s) where to locate plots (decimal degrees). Must correspond to "data", with same number and ordering of observations.
}
\item{nesting}{
Location-specific identifier, used to group the data into location-specific subsets and produce location specific plots. Must correspond to "data", with same number and ordering of observations.
}
\item{customfun}{
User-defined function to produce the plots, see details.
}
\item{goo}{
Name of the KML file to that will be saved into the working directory (use getwd() to find it).
}
\item{testrun}{
Diagnositc mode. T (will run only at the screen, for checking purposes) or F (will produce actual plots as png files for Google Earth).
}
}
\details{
The user needs to declare a function where the input is the "data" matrix, and the output is a plot.
Plots2GE will then apply this function to any location-specific subset (the locations being defined using the "nesting" parameter).
Any function is possible, just keep in mind that Plots2GE will apply it in a location-specific way
}
\value{
A KML file is produced in the current working directory.
}
\author{
Nils Arrigo, nils.arrigo@gmail.com
2012 EEB, the University of Arizona, Tucson
}
\seealso{
\code{
\link{par}
\link{plot}
}
}
\examples{
## Preparing fake matrix
center = cbind(1:6, 1:6)
nesting = rep(1:3, each = 2)
fakeVar1 = rnorm(300, 0, 1)
fakeVar2 = rnorm(300, 0, 1)
fakematrix = data.frame(nesting, center, fakeVar1, fakeVar2)
fakematrix
## Preparing a user-defined function for producing the desired plots
myfun = function(input){
plot(input[, 4], input[, 5], xlab='Xlab label', ylab='Ylab label', type = 'n', bty = 'n')
points(input[, 4], input[, 5], col='red', pch = 16, cex = 2)
}
## Producing KML - the easy way
Plots2GE(data = fakematrix,
center = fakematrix[, 2:3],
nesting = fakematrix[, 1],
customfun = myfun,
goo = "Plots2GE_V1.kml",
testrun = FALSE)
}
|
6b0b49e5a97d2049f0e3106206c90dd74b216e6b
|
254f9b74808e643c0802d97b9d558664af583375
|
/consultas_joins/joins_varios_01.R
|
483a142c72e17c042b04dcc643fbae50d72ee3a6
|
[] |
no_license
|
davgutavi/rupolab
|
6ff269e6fffbfddc0beb57c0a2b1d8468ba649d8
|
9a386b694f16c53d20051d4c9856d10002b241d3
|
refs/heads/master
| 2022-03-30T03:29:30.480002
| 2020-01-20T13:05:10
| 2020-01-20T13:05:10
| 110,011,143
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,740
|
r
|
joins_varios_01.R
|
#Joins varios 01
library(SparkR)
source("paths.R")
sparkR.session(master = "local[*]", sparkConfig = list(spark.local.dir="/mnt/datos/tempSparkR"))
conexp <- sql("
SELECT MaestroContratos.origen, MaestroContratos.cptocred, MaestroContratos.cfinca, MaestroContratos.cptoserv, MaestroContratos.cderind, MaestroContratos.cupsree,
MaestroContratos.ccounips,MaestroContratos.cupsree2, MaestroContratos.cpuntmed, MaestroContratos.tpuntmed, MaestroContratos.vparsist, MaestroContratos.cemptitu,
MaestroContratos.ccontrat, MaestroContratos.cnumscct, MaestroContratos.fpsercon, MaestroContratos.ffinvesu,
Expedientes.csecexpe, Expedientes.fapexpd, Expedientes.finifran, Expedientes.ffinfran, Expedientes.anomalia, Expedientes.irregularidad,
Expedientes.venacord, Expedientes.vennofai, Expedientes.torigexp, Expedientes.texpedie,Expedientes.expclass, Expedientes.testexpe,
Expedientes.fnormali, Expedientes.cplan, Expedientes.ccampa, Expedientes.cempresa, Expedientes.fciexped
FROM MaestroContratos JOIN Expedientes
ON MaestroContratos.origen=Expedientes.origen AND MaestroContratos.cfinca=Expedientes.cfinca AND
MaestroContratos.cptoserv=Expedientes.cptoserv AND MaestroContratos.cderind=Expedientes.cderind
")
# Expedientes.fapexpd >= MaestroContratos.fpsercon AND Expedientes.fapexpd <= MaestroContratos.ffinvesu
createOrReplaceTempView(conexp,"MaestroContratosExpedientes")
conexpapa <- sql("
SELECT MaestroContratosExpedientes.origen, MaestroContratosExpedientes.cptocred, MaestroContratosExpedientes.cfinca, MaestroContratosExpedientes.cptoserv, MaestroContratosExpedientes.cderind, MaestroContratosExpedientes.cupsree,
MaestroContratosExpedientes.ccounips,MaestroContratosExpedientes.cupsree2, MaestroContratosExpedientes.cpuntmed, MaestroContratosExpedientes.tpuntmed, MaestroContratosExpedientes.vparsist, MaestroContratosExpedientes.cemptitu,
MaestroContratosExpedientes.ccontrat, MaestroContratosExpedientes.cnumscct, MaestroContratosExpedientes.fpsercon, MaestroContratosExpedientes.ffinvesu,
MaestroContratosExpedientes.csecexpe, MaestroContratosExpedientes.fapexpd, MaestroContratosExpedientes.finifran, MaestroContratosExpedientes.ffinfran, MaestroContratosExpedientes.anomalia, MaestroContratosExpedientes.irregularidad,
MaestroContratosExpedientes.venacord, MaestroContratosExpedientes.vennofai, MaestroContratosExpedientes.torigexp, MaestroContratosExpedientes.texpedie,MaestroContratosExpedientes.expclass, MaestroContratosExpedientes.testexpe,
MaestroContratosExpedientes.fnormali, MaestroContratosExpedientes.cplan, MaestroContratosExpedientes.ccampa, MaestroContratosExpedientes.cempresa, MaestroContratosExpedientes.fciexped,
MaestroAparatos.csecptom, MaestroAparatos.fvigorpm, MaestroAparatos.fbajapm,MaestroAparatos.caparmed
FROM MaestroContratosExpedientes JOIN MaestroAparatos
ON MaestroContratosExpedientes.origen = MaestroAparatos.origen AND MaestroContratosExpedientes.cupsree2 = MaestroAparatos.cupsree2 AND MaestroContratosExpedientes.cpuntmed = MaestroAparatos.cpuntmed
")
createOrReplaceTempView(conexpapa,"MaestroContratosExpedientesMaestroAparatos")
conexpapacur<- sql("
SELECT MaestroContratosExpedientesMaestroAparatos.origen, MaestroContratosExpedientesMaestroAparatos.cptocred, MaestroContratosExpedientesMaestroAparatos.cfinca, MaestroContratosExpedientesMaestroAparatos.cptoserv,
MaestroContratosExpedientesMaestroAparatos.cderind, MaestroContratosExpedientesMaestroAparatos.cupsree,MaestroContratosExpedientesMaestroAparatos.ccounips,MaestroContratosExpedientesMaestroAparatos.cupsree2,
MaestroContratosExpedientesMaestroAparatos.cpuntmed, MaestroContratosExpedientesMaestroAparatos.tpuntmed, MaestroContratosExpedientesMaestroAparatos.vparsist, MaestroContratosExpedientesMaestroAparatos.cemptitu,
MaestroContratosExpedientesMaestroAparatos.ccontrat, MaestroContratosExpedientesMaestroAparatos.cnumscct, MaestroContratosExpedientesMaestroAparatos.fpsercon, MaestroContratosExpedientesMaestroAparatos.ffinvesu,
MaestroContratosExpedientesMaestroAparatos.csecexpe, MaestroContratosExpedientesMaestroAparatos.fapexpd, MaestroContratosExpedientesMaestroAparatos.finifran, MaestroContratosExpedientesMaestroAparatos.ffinfran,
MaestroContratosExpedientesMaestroAparatos.anomalia, MaestroContratosExpedientesMaestroAparatos.irregularidad,MaestroContratosExpedientesMaestroAparatos.venacord, MaestroContratosExpedientesMaestroAparatos.vennofai,
MaestroContratosExpedientesMaestroAparatos.torigexp, MaestroContratosExpedientesMaestroAparatos.texpedie,MaestroContratosExpedientesMaestroAparatos.expclass, MaestroContratosExpedientesMaestroAparatos.testexpe,
MaestroContratosExpedientesMaestroAparatos.fnormali, MaestroContratosExpedientesMaestroAparatos.cplan, MaestroContratosExpedientesMaestroAparatos.ccampa, MaestroContratosExpedientesMaestroAparatos.cempresa,
MaestroContratosExpedientesMaestroAparatos.fciexped,MaestroContratosExpedientesMaestroAparatos.csecptom, MaestroContratosExpedientesMaestroAparatos.fvigorpm, MaestroContratosExpedientesMaestroAparatos.fbajapm,
MaestroContratosExpedientesMaestroAparatos.caparmed,
CurvasCarga.flectreg, CurvasCarga.testcaco, CurvasCarga.obiscode, CurvasCarga.vsecccar,
CurvasCarga.hora_01, CurvasCarga.1q_consumo_01, CurvasCarga.2q_consumo_01, CurvasCarga.3q_consumo_01, CurvasCarga.4q_consumo_01,CurvasCarga.substatus_01,CurvasCarga.testmenn_01,CurvasCarga.testmecnn_01,
CurvasCarga.hora_02, CurvasCarga.1q_consumo_02, CurvasCarga.2q_consumo_02, CurvasCarga.3q_consumo_02, CurvasCarga.4q_consumo_02,CurvasCarga.substatus_02,CurvasCarga.testmenn_02,CurvasCarga.testmecnn_02,
CurvasCarga.hora_03, CurvasCarga.1q_consumo_03, CurvasCarga.2q_consumo_03, CurvasCarga.3q_consumo_03, CurvasCarga.4q_consumo_03,CurvasCarga.substatus_03,CurvasCarga.testmenn_03,CurvasCarga.testmecnn_03,
CurvasCarga.hora_04, CurvasCarga.1q_consumo_04, CurvasCarga.2q_consumo_04, CurvasCarga.3q_consumo_04, CurvasCarga.4q_consumo_04,CurvasCarga.substatus_04,CurvasCarga.testmenn_04,CurvasCarga.testmecnn_04,
CurvasCarga.hora_05, CurvasCarga.1q_consumo_05, CurvasCarga.2q_consumo_05, CurvasCarga.3q_consumo_05, CurvasCarga.4q_consumo_05,CurvasCarga.substatus_05,CurvasCarga.testmenn_05,CurvasCarga.testmecnn_05,
CurvasCarga.hora_06, CurvasCarga.1q_consumo_06, CurvasCarga.2q_consumo_06, CurvasCarga.3q_consumo_06, CurvasCarga.4q_consumo_06,CurvasCarga.substatus_06,CurvasCarga.testmenn_06,CurvasCarga.testmecnn_06,
CurvasCarga.hora_07, CurvasCarga.1q_consumo_07, CurvasCarga.2q_consumo_07, CurvasCarga.3q_consumo_07, CurvasCarga.4q_consumo_07,CurvasCarga.substatus_07,CurvasCarga.testmenn_07,CurvasCarga.testmecnn_07,
CurvasCarga.hora_08, CurvasCarga.1q_consumo_08, CurvasCarga.2q_consumo_08, CurvasCarga.3q_consumo_08, CurvasCarga.4q_consumo_08,CurvasCarga.substatus_08,CurvasCarga.testmenn_08,CurvasCarga.testmecnn_08,
CurvasCarga.hora_09, CurvasCarga.1q_consumo_09, CurvasCarga.2q_consumo_09, CurvasCarga.3q_consumo_09, CurvasCarga.4q_consumo_09,CurvasCarga.substatus_09,CurvasCarga.testmenn_09,CurvasCarga.testmecnn_09,
CurvasCarga.hora_10, CurvasCarga.1q_consumo_10, CurvasCarga.2q_consumo_10, CurvasCarga.3q_consumo_10, CurvasCarga.4q_consumo_10,CurvasCarga.substatus_10,CurvasCarga.testmenn_10,CurvasCarga.testmecnn_10,
CurvasCarga.hora_11, CurvasCarga.1q_consumo_11, CurvasCarga.2q_consumo_11, CurvasCarga.3q_consumo_11, CurvasCarga.4q_consumo_11,CurvasCarga.substatus_11,CurvasCarga.testmenn_11,CurvasCarga.testmecnn_11,
CurvasCarga.hora_12, CurvasCarga.1q_consumo_12, CurvasCarga.2q_consumo_12, CurvasCarga.3q_consumo_12, CurvasCarga.4q_consumo_12,CurvasCarga.substatus_12,CurvasCarga.testmenn_12,CurvasCarga.testmecnn_12,
CurvasCarga.hora_13, CurvasCarga.1q_consumo_13, CurvasCarga.2q_consumo_13, CurvasCarga.3q_consumo_13, CurvasCarga.4q_consumo_13,CurvasCarga.substatus_13,CurvasCarga.testmenn_13,CurvasCarga.testmecnn_13,
CurvasCarga.hora_14, CurvasCarga.1q_consumo_14, CurvasCarga.2q_consumo_14, CurvasCarga.3q_consumo_14, CurvasCarga.4q_consumo_14,CurvasCarga.substatus_14,CurvasCarga.testmenn_14,CurvasCarga.testmecnn_14,
CurvasCarga.hora_15, CurvasCarga.1q_consumo_15, CurvasCarga.2q_consumo_15, CurvasCarga.3q_consumo_15, CurvasCarga.4q_consumo_15, CurvasCarga.substatus_15, CurvasCarga.testmenn_15, CurvasCarga.testmecnn_15,
CurvasCarga.hora_16, CurvasCarga.1q_consumo_16, CurvasCarga.2q_consumo_16, CurvasCarga.3q_consumo_16, CurvasCarga.4q_consumo_16, CurvasCarga.substatus_16, CurvasCarga.testmenn_16, CurvasCarga.testmecnn_16,
CurvasCarga.hora_17, CurvasCarga.1q_consumo_17, CurvasCarga.2q_consumo_17, CurvasCarga.3q_consumo_17, CurvasCarga.4q_consumo_17, CurvasCarga.substatus_17, CurvasCarga.testmenn_17, CurvasCarga.testmecnn_17,
CurvasCarga.hora_18, CurvasCarga.1q_consumo_18, CurvasCarga.2q_consumo_18, CurvasCarga.3q_consumo_18, CurvasCarga.4q_consumo_18, CurvasCarga.substatus_18, CurvasCarga.testmenn_18, CurvasCarga.testmecnn_18,
CurvasCarga.hora_19, CurvasCarga.1q_consumo_19, CurvasCarga.2q_consumo_19, CurvasCarga.3q_consumo_19, CurvasCarga.4q_consumo_19, CurvasCarga.substatus_19, CurvasCarga.testmenn_19, CurvasCarga.testmecnn_19,
CurvasCarga.hora_20, CurvasCarga.1q_consumo_20, CurvasCarga.2q_consumo_20, CurvasCarga.3q_consumo_20, CurvasCarga.4q_consumo_20, CurvasCarga.substatus_20, CurvasCarga.testmenn_20, CurvasCarga.testmecnn_20,
CurvasCarga.hora_21, CurvasCarga.1q_consumo_21, CurvasCarga.2q_consumo_21, CurvasCarga.3q_consumo_21, CurvasCarga.4q_consumo_21, CurvasCarga.substatus_21, CurvasCarga.testmenn_21, CurvasCarga.testmecnn_21,
CurvasCarga.hora_22, CurvasCarga.1q_consumo_22, CurvasCarga.2q_consumo_22, CurvasCarga.3q_consumo_22, CurvasCarga.4q_consumo_22, CurvasCarga.substatus_22, CurvasCarga.testmenn_22, CurvasCarga.testmecnn_22,
CurvasCarga.hora_23, CurvasCarga.1q_consumo_23, CurvasCarga.2q_consumo_23, CurvasCarga.3q_consumo_23, CurvasCarga.4q_consumo_23, CurvasCarga.substatus_23, CurvasCarga.testmenn_23, CurvasCarga.testmecnn_23,
CurvasCarga.hora_24, CurvasCarga.1q_consumo_24, CurvasCarga.2q_consumo_24, CurvasCarga.3q_consumo_24, CurvasCarga.4q_consumo_24, CurvasCarga.substatus_24, CurvasCarga.testmenn_24, CurvasCarga.testmecnn_24,
CurvasCarga.hora_25, CurvasCarga.1q_consumo_25, CurvasCarga.2q_consumo_25, CurvasCarga.3q_consumo_25, CurvasCarga.4q_consumo_25, CurvasCarga.substatus_25, CurvasCarga.testmenn_25, CurvasCarga.testmecnn_25
FROM MaestroContratosExpedientesMaestroAparatos JOIN CurvasCarga
ON MaestroContratosExpedientesMaestroAparatos.origen = CurvasCarga.origen AND MaestroContratosExpedientesMaestroAparatos.cpuntmed = CurvasCarga.cpuntmed
AND CurvasCarga.obiscode = 'A' AND CurvasCarga.testcaco = 'R' AND
CurvasCarga.flectreg >= MaestroContratosExpedientesMaestroAparatos.fpsercon AND CurvasCarga.flectreg <= MaestroContratosExpedientesMaestroAparatos.ffinvesu
")
createOrReplaceTempView(conexpapacur,"MaestroContratosExpedientesMaestroAparatosCurvasCarga")
conirregularidad<- sql("
SELECT MaestroContratos.origen, MaestroContratos.cptocred, MaestroContratos.cfinca, MaestroContratos.cptoserv, MaestroContratos.cderind, MaestroContratos.cupsree,
MaestroContratos.ccounips,MaestroContratos.cupsree2, MaestroContratos.cpuntmed, MaestroContratos.tpuntmed, MaestroContratos.vparsist, MaestroContratos.cemptitu,
MaestroContratos.ccontrat, MaestroContratos.cnumscct, MaestroContratos.fpsercon, MaestroContratos.ffinvesu,
Expedientes.csecexpe, Expedientes.fapexpd, Expedientes.finifran, Expedientes.ffinfran, Expedientes.anomalia, Expedientes.irregularidad,
Expedientes.venacord, Expedientes.vennofai, Expedientes.torigexp, Expedientes.texpedie,Expedientes.expclass, Expedientes.testexpe,
Expedientes.fnormali, Expedientes.cplan, Expedientes.ccampa, Expedientes.cempresa, Expedientes.fciexped
FROM MaestroContratos JOIN Expedientes
ON MaestroContratos.origen=Expedientes.origen AND MaestroContratos.cfinca=Expedientes.cfinca AND
MaestroContratos.cptoserv=Expedientes.cptoserv AND MaestroContratos.cderind=Expedientes.cderind AND
Expedientes.fapexpd >= MaestroContratos.fpsercon AND Expedientes.fapexpd <= MaestroContratos.ffinvesu AND irregularidad = 'S'
")
#persist(conirregularidad,"DISK_ONLY")
conanomalia<- sql("
SELECT MaestroContratos.origen, MaestroContratos.cptocred, MaestroContratos.cfinca, MaestroContratos.cptoserv, MaestroContratos.cderind, MaestroContratos.cupsree,
MaestroContratos.ccounips,MaestroContratos.cupsree2, MaestroContratos.cpuntmed, MaestroContratos.tpuntmed, MaestroContratos.vparsist, MaestroContratos.cemptitu,
MaestroContratos.ccontrat, MaestroContratos.cnumscct, MaestroContratos.fpsercon, MaestroContratos.ffinvesu,
Expedientes.csecexpe, Expedientes.fapexpd, Expedientes.finifran, Expedientes.ffinfran, Expedientes.anomalia, Expedientes.irregularidad,
Expedientes.venacord, Expedientes.vennofai, Expedientes.torigexp, Expedientes.texpedie,Expedientes.expclass, Expedientes.testexpe,
Expedientes.fnormali, Expedientes.cplan, Expedientes.ccampa, Expedientes.cempresa, Expedientes.fciexped
FROM MaestroContratos JOIN Expedientes
ON MaestroContratos.origen=Expedientes.origen AND MaestroContratos.cfinca=Expedientes.cfinca AND
MaestroContratos.cptoserv=Expedientes.cptoserv AND MaestroContratos.cderind=Expedientes.cderind AND
Expedientes.fapexpd <= MaestroContratos.fpsercon AND Expedientes.fapexpd <= MaestroContratos.ffinvesu AND anomalia = 'S'
")
#todos los expedientes relacionados con ccontrat = 180000836140
t01<- sql("
SELECT * FROM MaestroContratosExpedientes WHERE ccontrat = 210016945200 AND irregularidad = 'S'
")
t02<- sql("
SELECT * FROM MaestroContratosExpedientes WHERE ccontrat = 210016945200
")
df3<-take(t02,100)
t03<- sql("
SELECT * FROM MaestroContratos WHERE ccontrat = 180000836140
")
df4<-take(t03,20)
#DISTINCT
#persist(conanomalia,"DISK_ONLY")
head(conanomalia)
count(conanomalia)
head(conirregularidad)
count(conirregularidad)
df1<-take(conirregularidad,40)
df<-take(conexp,30)
sparkR.stop()
|
8d7b8977002870aa537b934c3a30aae878eec1b0
|
02f8a640669a34eec542458bc3dcd4502c34bead
|
/R/utils.R
|
edf776aafd1ee4ebdc5aaedf259f7a68a34d7860
|
[] |
no_license
|
Yue-Jiang/karyoploteR
|
ae0bde33f7ec226fb92be9072ab69a82a132f76e
|
867f7cc722d5f814806750b4fa39ce988d10b441
|
refs/heads/master
| 2020-03-25T22:42:59.661088
| 2018-08-10T05:23:40
| 2018-08-10T05:23:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,479
|
r
|
utils.R
|
#internal
#Utility functions used only within the package
#Recycle the arguments as needed.
#Taken from:
# http://stackoverflow.com/questions/9335099/implementation-of-standard-recycling-rules
recycle <- function(...){
dotList <- list(...)
max.length <- max(sapply(dotList, length))
lapply(dotList, rep, length=max.length)
}
#Only recycles the first argument and returns it
recycle.first <- function(...){
dotList <- list(...)
max.length <- max(sapply(dotList, length))
return(rep_len(dotList[[1]], length.out=max.length))
}
#' filterParams
#'
#' @description
#' Given a list, select just only the valid.elements from each member. Also
#' works with vectors instead of lists
#'
#' @details
#' This function is used in filtering the graphical parameters when plotting
#' only a part of the genome. For each element of the list, if it has the
#' exact specified length, filters it using the 'valid.elements' parameter.
#'
#' @usage filterParams(p, valid.elements, orig.length)
#'
#' @param p a list or a single vector
#' @param valid.elements a boolean vector with the elements to keep
#' @param orig.length the length of the elements on which to apply the filtering
#'
#' @return
#' p with some members filtered
#'
#'
#' @examples
#'
#' a <- 1:10
#' b <- 3:5
#' c <- 2
#'
#' filterParams(list(a,b,c), c(rep(TRUE,5), rep(FALSE,5)), 10)
#' filterParams(a, c(rep(TRUE,5), rep(FALSE,5)), 10)
#'
#' @export filterParams
#'
filterParams <- function(p, valid.elements, orig.length) {
if(methods::is(p, "list")) { #If p is a list, filter each element independently
for(i in seq_len(length(p))) {
if(length(p[[i]])==orig.length) {
p[[i]] <- p[[i]][valid.elements]
}
}
} else { #else, filter p as a single element
if(length(p)==orig.length) {
p <- p[valid.elements]
}
}
return(p)
}
############ Colors ###############
#' lighter
#'
#' @description
#' Given a color, return a lighter one
#'
#' @details
#' Very simple utility function to create lighter colors. Given a color, it
#' transforms it to rgb space, adds a set amount to all chanels and transforms
#' it back to a color.
#'
#' @usage lighter(col, amount=150)
#'
#' @param col (color) The original color
#' @param amount (integer, [0-255]) The fixed amount to add to each RGB channel (Defaults to 150).
#'
#' @return
#' A lighter color
#'
#' @seealso \code{\link{darker}}
#'
#' @examples
#'
#' lighter("red")
#' lighter("#333333")
#'
#' @export lighter
#'
lighter <- function(col, amount=150) {
new.col <- ((grDevices::col2rgb(col))+amount)/255
new.col[new.col[,1]>1,1] <- 1
return(grDevices::rgb(t(new.col)))
}
#' darker
#'
#' @description
#' Given a color, return a darker one
#'
#' @details
#' Very simple utility function to create darker colors. Given a color, it
#' transforms it to rgb space, adds a set amount to all chanels and transforms
#' it back to a color.
#'
#' @usage darker(col, amount=150)
#'
#' @param col (color) The original color
#' @param amount (integer, [0-255]) The fixed amount to add to each RGB channel (Defaults to 150).
#'
#' @return
#' A darker color
#'
#' @seealso \code{\link{lighter}}
#'
#' @examples
#'
#' darker("red")
#' darker("#333333")
#'
#' @export darker
#'
#Given a color, returns a darker one
darker <- function(col, amount=150) {
new.col <- ((grDevices::col2rgb(col))-amount)/255
new.col[new.col[,1]<0, 1] <- 0
return(grDevices::rgb(t(new.col)))
}
|
28e4b6987a831af25e52e136ee105b17868925d4
|
3c6f49d7d20a99b1ebd27acd426a3270192f3fa6
|
/code/01_build_dataset.R
|
b13a8defa2c6b118f505ebe8b9672385aa56e0ee
|
[] |
no_license
|
ryanschmidt03/econ346honoroption
|
046bb9ba4d158c7118039814f010dda65ff03c48
|
6cec20fed00e4bd1d1017f1d5ea015a04d24e7b4
|
refs/heads/main
| 2023-06-09T17:42:39.363334
| 2021-06-30T21:05:43
| 2021-06-30T21:05:43
| 308,104,361
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
01_build_dataset.R
|
#This script builds the dataset.
library(pacman)
p_load(tidyverse,lubridate)
#####################################
#Read in visitation
#visit_raw <- read_csv(file = "data/visitation_with_policy_day_and_month_dummies.csv")
#Read in hourly visitation (I've included the total number of devices observed
#in the panel over time. This should be used to normalize the visits.)
hourly_raw <- read_csv("data/rmnp_hourly.csv") %>%
mutate(est_visits=round(visits/devices*331000000))
#we are calculating the fraction of the panel that visits RMNP and then
#multiplying by the US population assuming the panel represents the population
#Read in weather
weather_raw <- read_csv("data/weather_rmnp.csv")
# analysis_ds <- inner_join(visit_raw,
# weather_raw,
# by="date")
#Merge
analysis_ds <- inner_join(hourly_raw,
weather_raw,
by=c("measure_date"="date"))
saveRDS(analysis_ds,file = "cache/analysis_ds.rds")
|
70394269ef3bba852e53456db67ac63e34b8b623
|
091211fc733515cbcd42ad63998fcf6184bf3e77
|
/man/predict.tprofile.Rd
|
3a607617a159287dacf40fe57c54f1d17253eff2
|
[] |
no_license
|
AndrewYRoyal/ebase
|
3560e2e4e717120357b066f27fbfa094d6bb34ec
|
7decc805dc80d26a77505c8c4fb87816c63a7a24
|
refs/heads/master
| 2022-12-22T17:23:30.440452
| 2020-09-30T12:31:43
| 2020-09-30T12:31:43
| 168,870,979
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 297
|
rd
|
predict.tprofile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temp_profile.R
\name{predict.tprofile}
\alias{predict.tprofile}
\title{Predict site-year heating and cooling load}
\usage{
\method{predict}{tprofile}(x, dat)
}
\description{
Predict site-year heating and cooling load
}
|
642e168993f75f9f9adf087df384ff9bae9b47e9
|
5c0f37d8908d2fbd234a0cd0dddb371f4c0f2f77
|
/check/rFreight.Rcheck/00_pkg_src/rFreight/man/progressStart.Rd
|
e647a8f0b2ec63eb48f7ca8dcd12903c9a78cfff
|
[] |
no_license
|
CMAP-REPOS/cmap_freight_model
|
e5a1515eaf0e1861eab6ec94ea797b95e97af456
|
580f3bda1df885b1c3e169642eb483c2d92d7e3d
|
refs/heads/master
| 2023-05-01T10:26:36.170941
| 2021-02-10T18:24:57
| 2021-02-10T18:24:57
| 73,124,375
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 733
|
rd
|
progressStart.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/progressStart.R
\name{progressStart}
\alias{progressStart}
\title{Starts a model step: progress bar, timing, loading inputs}
\usage{
progressStart(steplist, steps, modellist = model)
}
\arguments{
\item{steplist}{List object for the current model component}
\item{steps}{Number of progress bar steps, integer (>=1)}
\item{modellist}{List object for the model, defaults to model}
}
\description{
This function is called at the beginning of a model component to initiate the
progress bar, to start timing the model steps, and to load the input used during the model
}
\examples{
\dontrun{
progressStart(firm_Synthesis,9)
}
}
\keyword{Management}
|
4d83d6f40b209d41f09cb6c0b476ade4a8d9bbda
|
5d3d1b0916535dad8a83a9dad9e23ed77b982d8e
|
/man/dsquared.Rd
|
50b9dda8b3144574f898b22ac47fadb8181445ea
|
[] |
no_license
|
cran/agrmt
|
3d280f0d45e7dcc141556269548296131f2c43cc
|
849caf12caabffb97aba71b2b2a54d2d36d2ec4a
|
refs/heads/master
| 2021-11-25T02:48:56.040559
| 2021-11-17T21:20:02
| 2021-11-17T21:20:02
| 17,694,324
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
rd
|
dsquared.Rd
|
\name{dsquared}
\alias{dsquared}
\title{Calculate d-squared}
\description{Calculate Blair and Lacy's d-squared.}
\usage{dsquared(V)}
\arguments{
\item{V}{A frequency vector}
}
\details{This function calculates Blair and Lacy's d-squared, a measure of concentration based on squared Euclidean distances. This function follows the presentation by Blair and Lacy 2000. The measure l-squared normalizes the values and is implemented as \code{\link{lsquared}}.}
\value{The function returns the d-squared.}
\references{Blair, J., and M. Lacy. 2000. Statistics of Ordinal Variation. Sociological Methods \& Research 28 (3): 251-280.}
\author{Didier Ruedin}
\seealso{\code{\link{lsquared}}, \code{\link{BlairLacy}}}
\examples{
# Sample data
V <- c(30,40,210,130,530,50,10)
dsquared(V)
}
|
a76448a67586f90aa90b9bacb71866087d9a1569
|
34ff60d1b274e0c4cf41d9f548b44b9792766939
|
/Maria/SentimentAnalysis/sentiment_day6.R
|
69f5bceaacb2693570aaa838e2dc748023dd4be3
|
[] |
no_license
|
thepanacealab/Hurricane-Analysis
|
6581f04e71dd2e29986cb15ef45b262eae1901e3
|
87eca0475ce16f8ffb680fd696f2a838ff44bc03
|
refs/heads/master
| 2020-05-23T15:46:35.195506
| 2019-08-20T02:14:37
| 2019-08-20T02:14:37
| 186,833,715
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,781
|
r
|
sentiment_day6.R
|
#Getting all the tweets with geocoded information
allTweetsDay6<-dbGetQuery(conn,
"SELECT
CAST(tweetuser AS varchar(1000)),
date(tweetcreated),
TRIM (LEADING '['
FROM split_part(tweetgeocoord, ',', 1)
) as lat,
TRIM (TRAILING ']'
FROM split_part(tweetgeocoord, ',', 2)
) as long,
tweetpname,
tweettext
FROM tweets_info
WHERE tweetcreated BETWEEN '2017-09-21' AND '2017-09-22';
")
residentTweetsDay6<- allTweetsDay6[allTweetsDay6$tweetuser %in% residentList,]
tweet_day6<- residentTweetsDay6$tweettext
#convert all text to lower case
tweet_day6<- tolower(tweet_day6)
# Replace blank space (“rt”)
tweet_day6 <- gsub("rt", "", tweet_day6)
# Replace @UserName
tweet_day6 <- gsub("@\\w+", "", tweet_day6)
# Remove punctuation
tweet_day6 <- gsub("[[:punct:]]", "", tweet_day6)
# Remove links
tweet_day6 <- gsub("http\\w+", "", tweet_day6)
# Remove tabs
tweet_day6 <- gsub("[ |\t]{2,}", "", tweet_day6)
# Remove blank spaces at the beginning
tweet_day6 <- gsub("^ ", "", tweet_day6)
# Remove blank spaces at the end
tweet_day6 <- gsub(" $", "", tweet_day6)
#getting emotions using in-built function
sentiment_day6<-get_nrc_sentiment((tweet_day6))
#calculationg total score for each sentiment
Sentimentscores_day6<-data.frame(colSums(sentiment_day6[,]))
names(Sentimentscores_day6)<-"Score"
Sentimentscores_day6<-cbind("day"=rep(c(21),10), "sentiment"=rownames(Sentimentscores_day6),Sentimentscores_day6)
rownames(Sentimentscores_day6)<-NULL
saveRDS(Sentimentscores_day6, "day6.Rds")
#plotting the sentiments with scores
ggplot(data=Sentimentscores_day6,aes(x=sentiment,y=Score, group=1))+ geom_line()+
geom_point()+
theme(legend.position="none")+
xlab("Sentiments")+ylab("scores")+ggtitle("Sentiments of people on Sep 21th, 2017")
|
3405aec690bc55a9bad2609fd337d61178e3c4ff
|
3d59091d775a71d6bd645e0089be0c1dc4a7ff09
|
/cachematrix.R
|
dcdff5be4cf65a93fcfe6b0c030a3438bfd4340f
|
[] |
no_license
|
rajeevkmenon/ProgrammingAssignment2
|
044da26760b8fbde7b55c3bc2b96b86e6f6c3bdf
|
7edc4b86b9dfab4494556feb37d168ea3c4278ad
|
refs/heads/master
| 2021-01-09T07:03:39.125610
| 2017-12-10T19:54:04
| 2017-12-10T19:54:04
| 37,754,923
| 0
| 0
| null | 2015-06-20T02:24:50
| 2015-06-20T02:24:50
| null |
UTF-8
|
R
| false
| false
| 1,715
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# makeCacheMatrix creates and stores a list with functions for
# 1. set the the matrix
# 2. get the the matrix
# 3. set the inverse of the matrix
# 4. get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
retinverse <- NULL
set <- function(y) {
x <<- y
retinverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) retinverse <<- inverse
getinverse <- function() retinverse
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
# This function returns the matrix inverse. First, it checks if
# the inverse has already been calculated. If yes, it returns the already calculated
# result instead of re-calculating. If no, it calculates the inverse and stores the result
# in the cache using setinverse function.
cacheSolve <- function(x, ...) {
# check if a previous calculation exists in cache..
invVal <- x$getinverse()
if(!is.null(invVal)) {
# cache exists.. return the existing invrse result
message("no recalculation needed. returning cached values.")
return(invVal)
}
message("first time calculation. no cache exists..")
# getting the input matrix..
matrixVal <- x$get()
# computing the inverse
invVal <- solve(matrixVal)
#caching the inverse calculation result..
x$setinverse(invVal)
# returning result
invVal
}
|
fb4711c31310cfe9ec6809394afff31c79716d86
|
8967080ed53683afe6783e51bce533c0e7d56e09
|
/problem3.R
|
34cb061fe811f39cf50c96526067d15149852389
|
[] |
no_license
|
niklaslang/UoE_MATH11174_Ass2
|
1598cce5e6dcda57efbfa5c105314e91cc503854
|
ff832e588d28ef7a02f4a68b67fabdafcd645776
|
refs/heads/master
| 2021-02-22T07:49:10.010477
| 2020-04-10T15:56:37
| 2020-04-10T15:56:37
| 245,372,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,351
|
r
|
problem3.R
|
### (a) ###
# Reading file `GDM.raw.txt` into a data table named `gdm.dt`,
# and storing rsID and coded allele in a separate data table (call it snp.allele.dt)
library(data.table)
gdm.dt <- fread("GDM.raw.txt")
if(!"stringr" %in% rownames(installed.packages())) {
install.packages("stringr") # functions for making regex stuff super intuitive
}
library(stringr)
rsID.regex <- "rs\\d+"
reference.allele.regex <- "[ACGT]$"
str_extract(colnames(gdm.dt)[-c(1,2,3)], rsID.regex)
str_extract(colnames(gdm.dt)[-c(1,2,3)], reference.allele.regex)
snp.allele.dt <- data.table( "snp.name" = colnames(gdm.dt)[-c(1,2,3)],
"rsID" = str_extract(colnames(gdm.dt)[-c(1,2,3)], rsID.regex),
"reference.allele" = str_extract(colnames(gdm.dt)[-c(1,2,3)], reference.allele.regex))
# Imputing missing values in `gdm.dt`` according to SNP-wise average allele count
table(is.na(gdm.dt))
for (colnm in colnames(gdm.dt[,-1])) {
gdm.dt[[colnm]][is.na(gdm.dt[[colnm]])] <- mean(gdm.dt[[colnm]], na.rm = TRUE)
}
table(is.na(gdm.dt))
### (b) ###
# Writing a function
# univ.glm.test <- function(x, y, order=FALSE)
# where x is a data table of SNPs, y is a binary outcome vector,
# and order is either TRUE or FALSE:
# the function should fit a logistic regression model for each SNP in x,
# and return a data table containing SNP names,
# regression coefficients,
# odds ratios,
# standard errors
# and p-values.
# If order is set to TRUE, the output data table should be ordered by increasing p-value
univ.glm.test <- function( data, outcome, order=FALSE){
output <- NULL
# loop over all SNPs
for (snp in 1:length(data)){
# assertion check
stopifnot(length(outcome) == length(data[[snp]]))
# fit logistic regression model
log.regr <- glm(outcome ~ data[[snp]], family = "binomial")
# regression model summary with beta, std.error and p.value
log.regr.summary <- data.table(signif(coef(summary(log.regr)),3))[-1,-3] # exclude intercept and t-value
# add SNP summary to output table
output <- rbind(output, log.regr.summary)
}
# add column of SNP IDs
output <- cbind(snp.allele.dt$snp.name, output)
# add colnames to output table
colnames(output) <- c("snp","beta", "std.error", "p.value")
# compute odds ratio
output[, odds.ratio := signif(exp(beta),3)]
if(order == TRUE){
# sort output by increasing p-value
setorder(output, p.value)
}
return(output)
}
### (c) ###
# Using function `univ.glm.test()`,
# running an association study for all the SNPs in `gdm.dt` against having gestational diabetes
# (column “pheno”).
gdm.snp.dt <- univ.glm.test(gdm.dt[,!c("ID","sex","pheno")], gdm.dt$pheno)
# For the SNP that is most strongly associated to increased risk of gestational diabetes
# and the one with most significant protective effect,
# reporting the summary statistics from the GWAS
# as well as the 95% and 99% confidence intervals on the odds ratio
# SNP most strongly associated with gestational diabetes
gdm.snp.dt[p.value == min(p.value)]
beta1 <- gdm.snp.dt[beta == max(beta), beta]
se1 <- gdm.snp.dt[beta == max(beta), std.error]
# 95% CI
round(exp(beta1 + 1.96 * se1 * c(-1, 1)), 3)
# 99% CI
round(exp(beta1 + 2.58 * se1 * c(-1, 1)), 3)
# SNP with most significant protective effect
gdm.snp.dt[odds.ratio == min(odds.ratio)]
beta2 <- gdm.snp.dt[odds.ratio == min(odds.ratio), beta]
se2 <- gdm.snp.dt[odds.ratio == min(odds.ratio), std.error]
# 95% CI
round(exp(beta2 + 1.96 * se2 * c(-1, 1)), 3)
# 99% CI
round(exp(beta2 + 2.58 * se2 * c(-1, 1)), 3)
### (d) ###
# merging the GWAS results with the table of gene names provided in file `GDM.annot.txt`
gdm.annot.dt <- fread("GDM.annot.txt")
gdm.gwas.dt <- merge(snp.allele.dt, gdm.annot.dt,
by.x = "rsID", by.y = "snp")
gdm.gwas.dt <- merge(gdm.gwas.dt, gdm.snp.dt,
by.x = "snp.name", by.y = "snp")
gdm.gwas.dt[, pos := as.numeric(pos)]
# reporting SNP name, effect allele, chromosome number and corresponding gene name
# for all SNPs that have p-value < 10−4
hit.snp.dt <- gdm.gwas.dt[p.value < 1e-4]
hit.snp.dt[,c("snp.name","reference.allele","chrom","gene")]
# for all hit SNPs reporting all gene names that are within a 1Mb window from the SNP position on the same chromosome
# hit no.1
gdm.gwas.dt[chrom == hit.snp.dt$chrom[1]][pos >= hit.snp.dt$pos[1] - 1000000 & pos <= hit.snp.dt$pos[1] + 1000000]$gene
# hit.no.2
gdm.gwas.dt[chrom == hit.snp.dt$chrom[2]][pos >= hit.snp.dt$pos[2] - 1000000 & pos <= hit.snp.dt$pos[2] + 1000000]$gene
### (e) ###
# Building a weighted genetic risk score that includes all SNPs with p-value < 10−4,
# a second score with all SNPs with p-value < 10−3,
# and a third score that only includes SNPs on the FTO gene
# ensure that the ordering of SNPs is respected
gdm.gwas.dt <- gdm.gwas.dt[match(colnames(gdm.dt)[-c(1,2,3)], gdm.gwas.dt$snp.name),]
# assertion check that the ordering of SNPs is respected
stopifnot(colnames(gdm.dt)[-c(1,2,3)] == gdm.gwas.dt$snp.name)
# score 1: p.value < 10^-4
gdm1.snp <- gdm.gwas.dt[p.value < 1e-4]
gdm1.grs <- gdm.dt[, .SD, .SDcols = gdm.gwas.dt[p.value < 1e-4]$snp.name]
gdm1.weighted.grs <- as.matrix(gdm1.grs) %*% gdm1.snp$beta
# score 2: p.value < 10^-3
gdm2.snp <- gdm.gwas.dt[p.value < 1e-3]
gdm2.grs <- gdm.dt[, .SD, .SDcols = gdm.gwas.dt[p.value < 1e-3]$snp.name]
gdm2.weighted.grs <- as.matrix(gdm2.grs) %*% gdm2.snp$beta
# score 3: SNP on the FTO gene
gdm3.snp <- gdm.gwas.dt[gene == "FTO"]
gdm3.grs <- gdm.dt[, .SD, .SDcols = gdm.gwas.dt[gene == "FTO"]$snp.name]
gdm3.weighted.grs <- as.matrix(gdm3.grs) %*% gdm3.snp$beta
# adding the three scores as columns to the `gdm.dt` data table
gdm.dt$p4.score <- gdm1.weighted.grs
gdm.dt$p3.score <- gdm2.weighted.grs
gdm.dt$FTO.score <- gdm3.weighted.grs
# fitting the three scores in separate logistic regression models to test their association
# with gestational diabetes:
# score 1: SNPs with p.value < 10^-4
p4.score.log.regr <- glm(pheno ~ p4.score, data = gdm.dt, family = "binomial")
# score 2: SNPs with p.value < 10^-3
p3.score.log.regr <- glm(pheno ~ p3.score, data = gdm.dt, family = "binomial")
# score 3: SNPs on the FTO gene
FTO.score.log.regr <- glm(pheno ~ FTO.score, data = gdm.dt, family = "binomial")
# function to calculate odds ratio, 95% CI l and p-value for a logistic regression model
model.stats <- function(model){
# compute odds ratio, 95% CI and p-value
odds.ratio <- exp(coef(summary(model))[2,1])
CI.lower <- exp(confint(model)[2,1])
CI.upper <- exp(confint(model)[2,2])
p.value <- coef(summary(model))[2,4]
# brief summary table of the summary statistics
gdm.grs.dt <- data.table(rbind(NULL, c(round(odds.ratio,3), round(CI.lower,3), round(CI.upper,3), signif(p.value,3))))
colnames(gdm.grs.dt) <- c("odds.ratio","2.5%","97.5%","p.value")
return(gdm.grs.dt)
}
# reporting odds ratio, 95% confidence interval and p-value for each model
# score 1: SNPs with p.value < 10^-4
model.stats(p4.score.log.regr)
# score 2: SNPs with p.value < 10^-3
model.stats(p3.score.log.regr)
# score 3: SNPs on the FTO gene
model.stats(FTO.score.log.regr)
### (f) ###
# Reading the file `GDM.test.txt` into variable `gdm.test.dt`
gdm.test.dt <- fread("GDM.test.txt", stringsAsFactors = TRUE)
# For the set of patients in `gdm.test.dt`,
# computing the three genetic risk scores as defined at point (e) using the same set of SNPs and corresponding weights
# ensure that the ordering of SNPs is respected
gdm.gwas.dt <- gdm.gwas.dt[match(colnames(gdm.test.dt)[-c(1,2,3)], gdm.gwas.dt$rsID),]
# assertion check that the ordering of SNPs is respected
stopifnot(colnames(gdm.test.dt)[-c(1,2,3)] == gdm.gwas.dt$rsID)
# score 1: p.value < 10^-4
gdm.test1.grs <- gdm.test.dt[, .SD, .SDcols = gdm.gwas.dt[p.value < 1e-4]$rsID]
gdm.test1.weighted.grs <- as.matrix(gdm.test1.grs) %*% gdm1.snp$beta
# score 2: p.value < 10^-3
gdm.test2.grs <- gdm.test.dt[, .SD, .SDcols = gdm.gwas.dt[p.value < 1e-3]$rsID]
gdm.test2.weighted.grs <- as.matrix(gdm.test2.grs) %*% gdm2.snp$beta
# score 3: SNP on the FTO gene
gdm.test3.grs <- gdm.test.dt[, .SD, .SDcols = gdm.gwas.dt[gene == "FTO"]$rsID]
gdm.test3.weighted.grs <- as.matrix(gdm.test3.grs) %*% gdm3.snp$beta
# Adding the three scores as columns to `gdm.test.dt` (hint: use the same column names as before)
gdm.test.dt$p4.score <- gdm.test1.weighted.grs
gdm.test.dt$p3.score <- gdm.test2.weighted.grs
gdm.test.dt$FTO.score <- gdm.test3.weighted.grs
### (g) ###
# Using the logistic regression models fitted at point (e) to predict the outcome of patients in gdm.test.dt
p4.score.pred <- predict(p4.score.log.regr, gdm.test.dt, type="response")
p3.score.pred <- predict(p3.score.log.regr, gdm.test.dt, type="response")
FTO.score.pred <- predict(FTO.score.log.regr, gdm.test.dt, type="response")
# Computing the test log-likelihood for the predicted probabilities from the three genetic risk score models
# with the binomial likelihood function sum(log(all prediction values where the observed result was 1)) + sum(log( 1 - all prediction values where the observed result was 0))
# for score 1: p.value < 10^-4
p4.score.pred.loglik <- sum(log(p4.score.pred[gdm.test.dt$pheno == 1])) + sum(log(1-p4.score.pred[gdm.test.dt$pheno == 0]))
p4.score.pred.loglik
# for score 2: p.value < 10^-3
p3.score.pred.loglik <- sum(log(p3.score.pred[gdm.test.dt$pheno == 1])) + sum(log(1-p3.score.pred[gdm.test.dt$pheno == 0]))
p3.score.pred.loglik
# for score 3: FTO gene
FTO.score.pred.loglik <- sum(log(FTO.score.pred[gdm.test.dt$pheno == 1])) + sum(log(1-FTO.score.pred[gdm.test.dt$pheno == 0]))
FTO.score.pred.loglik
# compute the log-likelihoods of the three models
logLik(p4.score.log.regr)
logLik(p3.score.log.regr)
logLik(FTO.score.log.regr)
# perform log-likelihood test of the three models against the null models
pchisq(p4.score.log.regr$null.deviance - p4.score.log.regr$deviance, df=1, lower.tail=FALSE)
pchisq(p3.score.log.regr$null.deviance - p3.score.log.regr$deviance, df=1, lower.tail=FALSE)
pchisq(FTO.score.log.regr$null.deviance - FTO.score.log.regr$deviance, df=1, lower.tail=FALSE)
### (h) ###
# Performing a meta-analysis of `GDM.study2.txt`
# containing the summary statistics from a different study on the same set of SNPs
# and the results obtained at point (c)
gdm.gwas2.dt <- fread("GDM.study2.txt")
gdm.gwas1.dt <- gdm.gwas.dt
# harmonize datasets
gdm.gwas2.dt <- gdm.gwas2.dt[snp %in% gdm.gwas.dt$rsID]
gdm.gwas.dt <- gdm.gwas.dt[rsID %in% gdm.gwas2.dt$snp]
# order by chromosome and position
gdm.gwas2.dt <- gdm.gwas2.dt[match(gdm.gwas.dt$rsID, gdm.gwas2.dt$snp),]
stopifnot(all.equal(gdm.gwas.dt$rsID, gdm.gwas2.dt$snp))
# matching alleles
matching.alleles <- gdm.gwas.dt$reference.allele == gdm.gwas2.dt$effect.allele & gdm.gwas.dt$rsID == gdm.gwas2.dt$snp
# flipped alleles
flipping.alleles <- gdm.gwas.dt$reference.allele == gdm.gwas2.dt$other.allele & gdm.gwas.dt$rsID == gdm.gwas2.dt$snp
# unmatched alleles
unmatched.alleles <- matching.alleles == flipping.alleles
# summary
table(matching.alleles, flipping.alleles)
# ensure that the effect alleles correspond
beta1 <- gdm.gwas1.dt$beta
beta2 <- gdm.gwas2.dt$beta
beta2[flipping.alleles] <- -beta2[flipping.alleles]
# exclude SNPs that couldn't be matched after swapping
beta1 <- beta1[!unmatched.alleles]
beta2 <- beta2[!unmatched.alleles]
# inverse variance weighting
weight.gwas1 <- 1 / gdm.gwas1.dt$std.error[!unmatched.alleles]^2
weight.gwas2 <- 1 / gdm.gwas2.dt$se[!unmatched.alleles]^2
# computing the meta-analysis effect size
# which is a weighted sum of the effect sizes from each study, weighted according to the weight just derived.
beta.ma <- (weight.gwas1 * beta1 + weight.gwas2 * beta2) / (weight.gwas1 + weight.gwas2)
se.ma <- sqrt(1 / (weight.gwas1 + weight.gwas2))
# plotting the p-values of the meta-analysis against the p-values of the first study
pval.ma <- 2 * pnorm(abs(beta.ma / se.ma), lower.tail=FALSE)
ma.results.dt <- data.table("snp" = gdm.gwas1.dt$rsID[!unmatched.alleles],
"p.value" = pval.ma)
# select for meta-analysis p-value < 10^−4
ma.results.dt <- ma.results.dt[p.value < 1e-4]
setorder(ma.results.dt, p.value)
# show
head(ma.results.dt)
|
25fb8d169c3c3aba6f484721123f75d8aeeb3a3e
|
dcee1dc28392dee9c57ccf0b3c1c5a20a300fbcb
|
/man/get_benchmark_fund_relationship.Rd
|
1794eed91ec291a07b10b0d5598d6f53a25c78c8
|
[] |
no_license
|
stoltzmaniac/AZASRS
|
05b1156581ecba3c585f35c8094cb6f244b30bf2
|
3213480feabefd45d1c4e17953ef030def9f72b5
|
refs/heads/master
| 2020-12-12T13:53:55.141655
| 2020-01-15T15:54:28
| 2020-01-15T15:54:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 626
|
rd
|
get_benchmark_fund_relationship.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_benchmark_fund_relationship.R
\name{get_benchmark_fund_relationship}
\alias{get_benchmark_fund_relationship}
\title{Get relationships between funds and benchmarks get_benchmark_fund_relationship}
\usage{
get_benchmark_fund_relationship(
con = AZASRS_DATABASE_CONNECTION(),
return_tibble = FALSE
)
}
\arguments{
\item{con}{is the db connection, default of AZASRS_DATABASE_CONNECTION()}
}
\value{
Returns a table of relationships of ALL types, not filtered
}
\description{
Finds all data from benchmark_info_id matched with pm_fund_info_id
}
|
e8ec5729e5b3bcc4f43d9caee389793bac5da158
|
27d0c7693aa36a78f82929c32f0967707b9c0429
|
/man/getAE.Rd
|
48c80330c570f95a424f181e87be82973660f01d
|
[] |
no_license
|
suhaibMo/ArrayExpress
|
2fbc6f4dce9e53211263caf5c20fc4be1c9e83c9
|
90c0f055598de3320d7e9759408dd983d968ff8f
|
refs/heads/master
| 2020-03-08T20:01:08.935108
| 2018-04-06T08:52:14
| 2018-04-06T08:52:14
| 128,371,135
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,626
|
rd
|
getAE.Rd
|
\name{getAE}
\alias{getAE}
\docType{data}
\title{ Download MAGE-TAB files from ArrayExpress in a specified directory }
\description{
\code{getAE} downloads and extracts the MAGE-TAB files from an
ArrayExpress dataset.
}
\usage{
getAE(accession, path = getwd(), type = "full", extract = TRUE, local = FALSE, sourcedir = path)
}
\arguments{
\item{accession}{ is an ArrayExpress experiment identifier. }
\item{path}{ is the name of the directory in which the files
downloaded on the ArrayExpress repository will be extracted.}
\item{type}{ can be 'raw' to download and extract only the raw data,
'processed' to download and extract only the processed data or
'full' to have both raw and processed data.}
\item{extract}{ if FALSE, the files are not extracted from the zip archive.}
\item{local}{ if TRUE, files will be read from a local folder specified by sourcedir.}
\item{sourcedir}{ when local = TRUE, files will be read from this directory.}
}
\value{
\code{ A list with the names of the files that
have been downloaded and extracted. }
}
\seealso{\code{\link[ArrayExpress]{ArrayExpress}},
\code{\link[ArrayExpress]{ae2bioc}},
\code{\link[ArrayExpress]{getcolproc}},
\code{\link[ArrayExpress]{procset}}}
\author{
Ibrahim Emam, Audrey Kauffmann
Maintainer: <iemam@ebi.ac.uk>
}
\examples{
mexp1422 = getAE("E-MEXP-1422", type = "full")
## Build a an ExpressionSet from the raw data
MEXP1422raw = ae2bioc(mageFiles = mexp1422)
## Build a an ExpressionSet from the processed data
cnames = getcolproc(mexp1422)
MEXP1422proc = procset(mexp1422, cnames[2])
}
\keyword{datasets}
|
6e7f22594595cd875876ecd19603a4fd813dad1f
|
78cbe41b44c4b6004664261ca40ad000b9369b5e
|
/run_analysis.R
|
eca513ac34187ebb34ac4847b28f4b6a99890be0
|
[] |
no_license
|
Krish31875/Getting-and-Cleaning-data
|
71ea13aeb41d125aeb2038ce5a59e35cfea24c03
|
934c901300e6306d23943ebad569fe658088ebde
|
refs/heads/master
| 2021-01-10T16:52:20.502321
| 2016-02-06T14:29:49
| 2016-02-06T14:29:49
| 51,203,895
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,366
|
r
|
run_analysis.R
|
gd <- read.csv("getdata_data_ss06hid[1].csv")
getwd()
gd <- read.csv("getdata_data_ss06hid.csv")
dim(gd)
stsplit(names(gd))[123]
strsplit(names(gd))[123]
strsplit(gd, names)
varnames <- strsplit(gd, "wgtp")
varnames <- strsplit(names(gd), "wgtp")
varnames[[123]]
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
f <- filepath(getwd(), "getdata.csv")
f <- file.path(getwd(), "getdata.csv")
download.file(fileurl,f)
getdata <- read.csv("getdata.csv")
dim(getdata)
head(getdata)
str(getdata)
names(getdata)
install.packages("data.table")
library(data.table)
dtGDP <- data.table(read.csv(getdata, skip = 4, nrows = 215, stringsAsFactors = FALSE))
summary(getdata)
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
f <- file.path(getwd(), "project.csv")
download.file(url, f)
project <- read.csv("project.csv")
dim(project)
getwd()
library(reshape2)
f<- file.path(getwd(), "project.zip")
download.file(url, f)
unzip(project.zip)
unzip("project.zip")
Labels <- read.table("UCI HAR Dataset/activity_labels.txt")
Labels
Labels[,2] <- as.character(activityLabels[,2])
Labels[,2] <- as.character(Labels[,2])
Labels[,2]
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features)
features[,2] <- as.character(features[,2])
features[,2]
requiredfeatures <- grep(".*mean.*|.*std.*", features[,2])
requiredfeatures.names <- features[requiredfeatures,2]
requiredfeatures.names <- gsub('-mean', 'mean', requiredfeatures.names)
requiredfeatures.names <- gsub('-std', 'std', requiredfeatures.names)
requiredfeatures.names <- gsub('[-()]', '', requiredfeatures.names)
#download the datasets
train <- read.table("UCI HAR Datset/train/X_train.txt")
train <- read.table("UCI HAR Datset/train/X_train.txt")[requiredfeatures]
train <- read.table("UCI HAR Dataset/train/X_train.txt")
dim(train)
trainact <- read.table("UCI HAR Datset/train/Y_train.txt")
trainact <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainsub <- read.table("UCI HAR Dataset/train/subject_train.txt")
trainact
dim(trainact)
dim(trainsub)
train <- cbind(trainsub, trainact, train)
dim(train)
head(train)
#download test datasets
test <- read.table("UCI HAR Dataset/test/X_test.txt")
test <- read.table("UCI HAR Dataset/test/X_test.txt")[requiredfeatures]
dim(test)
head(test)
testact <- read.table("UCI HAR Dataset/test/Y_test.txt")
dim(testact)
head(testact)
testsub <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testsub, testact, test)
#merge all datasets
alldata <- rbind(train, test)
train <- read.table("UCI HAR Dataset/train/X_train.txt")[requiredfeatures]
train <- cbind(trainsub, trainact, train)
alldata <- rbind(train, test)
colnames <- c("subject", "activity", requiredfeatures.names)
str(colnames)
head(Labels[,1])
head(labels[,2])
head(Labels[,2])
head(colnames)
allData$activity <- factor(alldata$activity, levels = Labels[,1], labels = labels[,2])
allData$activity <- factor(alldata$activity, levels = Labels[,1], labels = Labels[,2])
alldata <- colnames
allData$activity <- factor(alldata$activity, levels = Labels[,1], labels = Labels[,2])
alldata$activity <- factor(alldata$activity, levels = Labels[,1], labels = Labels[,2])
colnames(alldata) <- c("subject", "activity", requiredfeatures.names)
alldata$subject <- as.factor(alldata$subject)
alldata <- rbind(train, test)
dim(alldata)
colnames(alldata) <- c("subject", "activity", requiredfeatures.names)
alldata$activity <- factor(alldata$activity, levels = Labels[,1], labels = Labels[,2])
alldata$subject <- as.factor(alldata$subject)
dim(alldata)
head(alldata)
alldata.melted <- melt(alldata, id = c("subject", "activity"))
head(alldata.melted)
dim(alldata.melted)
alldata.mean <- dcast(alldata.melted, subject+activity ~variable, mean)
head(alldata.mean)
dim(alldata.mean)
write.table(alldata.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
save(file = getwd(), "run_analysis.R")
save(file = "d:/downloads/run_analysis.RData")
save(file = "C:/users/Nikhilanantha/Downloads/run_analysis.RData")
load("C:/Users/Nikhilanantha/Downloads/run_analysis.RData")
save.image("C:/Users/Nikhilanantha/Downloads/run_analysis.RData")
savehistory("C:/Users/Nikhilanantha/Downloads/run_analysis.RData")
load("C:/Users/Nikhilanantha/Downloads/run_analysis.RData")
load("C:/Users/Nikhilanantha/Downloads/run_analysis.RData")
|
470510473cadccb5e93d5594efb214832b96f36e
|
08cd51b59bed5318ca5701c31356bd625d862b66
|
/corr.R
|
3f227bcc38b4969459cc18faa80d34b4a93a084c
|
[] |
no_license
|
Franzhang/R-Programming
|
07f31464a9876c93db656824dfcde75bcc35acae
|
2023cd9d1cda730d928253c5cefaca1956c49da7
|
refs/heads/master
| 2016-09-05T14:34:46.271413
| 2014-08-06T01:07:54
| 2014-08-06T01:07:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 554
|
r
|
corr.R
|
corr <- function(directory, threshold = 0) {
files_list <- list.files(directory, full.names = TRUE)
id <- 1:332
dat <- data.frame()
cor_vec <- c()
nobs <-c()
x <- integer()
y <- numeric()
for(i in id){
dat <- read.csv(files_list[i])
x <- sum(complete.cases(dat))
nobs <- c(nobs, x)
y <- cor(dat$nitrate, dat$sulfate, use = "na.or.complete")
cor_vec <- c(cor_vec, y)
}
outcome <- c()
for(i in id){
if(nobs[i] > threshold) {
z <- cor_vec[i]
outcome <- c(outcome, z)
}
}
return(outcome)
}
|
9a055925f1a6b518773c180da2dc9f30eb5d63f8
|
04b4df4159043ec6db1cdf47b7314d7cf56cd657
|
/man/stanova_lm.Rd
|
a31933f2d19afc3894edadc07cd9971545286625
|
[] |
no_license
|
bayesstuff/stanova
|
f37630f26fecaf4f3e21bfe07def0232ac91ff82
|
988ad8e07cda1674b881570a85502be7795fbd4e
|
refs/heads/master
| 2021-07-04T04:34:07.870525
| 2021-06-06T18:54:59
| 2021-06-06T18:54:59
| 241,858,315
| 8
| 0
| null | 2020-09-18T14:33:54
| 2020-02-20T10:41:11
|
HTML
|
UTF-8
|
R
| false
| true
| 2,286
|
rd
|
stanova_lm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stanova_lm.R
\name{stanova_lm}
\alias{stanova_lm}
\alias{stanova_aov}
\alias{stanova_glm}
\title{Estimate ANOVA-type models with rstanarm}
\usage{
stanova_lm(formula, data, check_contrasts = "contr.bayes", ...)
stanova_aov(formula, data, check_contrasts = "contr.bayes", ...)
stanova_glm(formula, data, family, check_contrasts = "contr.bayes", ...)
}
\arguments{
\item{formula}{a formula describing the model to be fitted. Passed to
\code{rstanarm::stan_glm}.}
\item{data}{\code{data.frame} containing the data.}
\item{check_contrasts}{\code{character} string (of length 1) denoting a contrast
function or a contrast function which should be assigned to all \code{character}
and \code{factor} variables in the model (as long as the specified contrast is
not the global default). Default is \link{contr.bayes}. Set to \code{NULL} to disable
the check.}
\item{...}{further arguments passed to the \code{rstanarm} function used for
fitting. Typical arguments are \code{prior}, \code{prior_intercept}, \code{chain}, \code{iter},
or \code{core}.}
\item{family}{\code{family} argument passed to \code{stan_glm} (set to \code{"gaussian"} for
\code{stanova_lm} and \code{stanova_aov}).}
}
\description{
Estimate ANOVA-type models with rstanarm
}
\note{
\code{stanova_aov} is a copy of \code{stanova_lm}. All functions discussed here
are only wrappers around \code{\link{stanova}} setting \code{model_fun} to \code{"glm"} (and
\code{family = "gaussian"} for \code{stanova_lm}).
}
\examples{
fit_warp <- stanova_lm(breaks ~ wool * tension, data = warpbreaks,
prior = rstanarm::student_t(3, 0, 20, autoscale = FALSE),
chains = 2, iter = 500)
summary(fit_warp)
### from: ?predict.glm
## example from Venables and Ripley (2002, pp. 190-2.)
dfbin <- data.frame(
ldose = rep(0:5, 2),
numdead = c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16),
sex = factor(rep(c("M", "F"), c(6, 6)))
)
budworm.lg <- stanova_glm(cbind(numdead, numalive = 20-numdead) ~ sex*ldose,
data = dfbin,
family = binomial,
chains = 2, iter = 500)
## note: only sex is categorical, ldose is continuous
summary(budworm.lg)
}
|
53f5b8529eb08ab7b2f3da50b8bf347515eac05d
|
66278b8e44b1ed85d37868ae1ea27d5514a77138
|
/rlang_201Grade/.Rproj.user/38390253/sources/per/t/2871C139-contents
|
efb28010a6a9cda2c0cd10ce01e602440bfacff5
|
[] |
no_license
|
amkan5/R
|
d26c7bd11e66d9e830438f8d8cd01bcba75cd012
|
1477f9bdf16e27495c09bbe04011a0c8e0757d90
|
refs/heads/master
| 2020-03-23T04:37:33.476904
| 2018-08-03T08:39:56
| 2018-08-03T08:39:56
| 141,095,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
2871C139-contents
|
## dplyr
# filter() 행추출
# select() 열추출
# arrange() 정렬
# mutate() 변수추가
# summarise() 통계치산출
# group_by() 집단별로 나누기
# left_join() 데이터합치기(열)
# bind_rows() 데이터합치기(행)
# view() 뷰어창에서 데이터 확인 !! 주의... v가 대문자
install.packages("dplyr")
library(dplyr)
path <- getwd() #working directory 의 약자 지금 작업하는 위치
path
setwd("csv_exam") #working directory 변경
df_exam <- read.csv("csv_exam.csv")
#안에 문자면 stringAsFactors = F 로 옆에 줘야함
is.data.frame(df_exam)
View(df_exam)
df_exam <- rename (df_exam, userid=id)
df_exam$total <- df_exam$math+df_exam$english+df_exam$english
df_exam$avg <- mean(df_exam$total)
df_exam$grade <- ifelse(
df_exam$avg >= 90, "A",
ifelse(df_exam$avg >=80, "B",
ifelse(df_exam$avg >=70, "C",
ifelse(df_exam$avg >=60, "D","E")
))
)
(df_exam)
|
|
2677d3f342a8b893007d0ca7d23fdd1c9b7c8ab2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nlmeU/examples/missPat.Rd.R
|
f14fe7329549afba23349b6f1535a4b93914aea9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
missPat.Rd.R
|
library(nlmeU)
### Name: missPat
### Title: Extract pattern of missing data
### Aliases: missPat
### ** Examples
dtf <- subset(armd.wide,
select = c(visual12, visual24, visual52))
missPat(dtf, symbols = c("?","+"))
|
887475c3095976c7d9996ca17d1523966849fa29
|
0885e50ada7d5d8df3e3418a63ec126f5e599477
|
/Predict_Extract/10k_buffer.R
|
13fadc0c0f6d73fd6e4d21d940de88abbeb48de0
|
[
"Unlicense"
] |
permissive
|
hamishgibbs/Gibbs_Thesis_R
|
badda547bef0923d2c4dac151ef4eb774c64b57f
|
71e7661d565c0c0b3d6b82d8078d9ea90da61b9e
|
refs/heads/master
| 2023-07-03T02:35:05.536420
| 2019-08-12T09:49:27
| 2019-08-12T09:49:27
| 201,491,440
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,311
|
r
|
10k_buffer.R
|
library(rgdal)
library(rgeos)
library(sp)
library(spatial)
library(raster)
#Make a point file with all the points within 10 k of the study site
pts = readOGR(dsn='F:/Predictive_Modelling/Vector_Data', layer='Cell_Centroids')
boundary = readOGR(dsn='F:/Field_data/04_Colombia_results', layer='plot_boundary')
plot(boundary)
centroid = gCentroid(boundary)
centroid
plot(centroid, add=T)
buffer = gBuffer(centroid, width = 10000.0, byid=FALSE, capStyle = 'SQUARE')
plot(buffer)
buffer = as(buffer, 'SpatialPolygonsDataFrame')
buffer = SpatialPolygonsDataFrame(buffer, data=buffer@data)
writeOGR(obj=buffer, dsn='F:/Predictive_Modelling/Vector_Data', layer="10k_Buffer", driver="ESRI Shapefile")
points_crop = crop(pts, buffer)
points_crop
writeOGR(obj=points_crop, dsn='F:/Predictive_Modelling/Vector_Data', layer="10k_Points", driver="ESRI Shapefile")
plot(points_crop)
brick = brick('F:/Predictive_Modelling/S2_Scene/Mask/S2A_MSIL1C_20180820T153621_N0206_R068_T18NUJ_20180820T210738_S2C_resampled_msk.tif')
band = subset(brick, 3)
#extracting 10 points
start_time = Sys.time()
extract_test = extract(band, pts[1:10,])
end_time = Sys.time()
te = as(end_time - start_time, 'numeric')
n = 5755
time = (((n/10) * te)/60)
time
#3 minutes per raster
time * 30
|
2a0ee03612e428c76ae88be6288890211c494e89
|
5168565cb17124d490b1019e5e2f8e06433936a0
|
/cachematrix.R
|
a14eda015f5be28d749ece3c5b4d825cfee3d196
|
[] |
no_license
|
yamanarora/ProgrammingAssignment2
|
ac37b46f703131ab05d08b994094a46a5e2ec774
|
472e3dc3e579d10a82521192ace4a5cd486d4a89
|
refs/heads/master
| 2021-01-12T09:09:48.422428
| 2016-12-18T10:54:16
| 2016-12-18T10:54:16
| 76,776,379
| 0
| 0
| null | 2016-12-18T10:26:50
| 2016-12-18T10:26:49
| null |
UTF-8
|
R
| false
| false
| 1,106
|
r
|
cachematrix.R
|
## Here are two functions that are aimed at getting the inverse of a matrix (assumed to be invertible). The aim is to check the cache
## for an existing solution, and if solution is not present, then calculate the inverse and store it in the cache.
## The function makeCacheMatrix accepts a matrix whose inverse is to be found as input. It creates a matrix to store its inverse in
## the cache.
makeCacheMatrix <- function(x = matrix()) {
xinv<-NULL
set<-function(y){
x<<-y
xinv<<-NULL
}
get<-function() x
setmat<-function(solve) xinv<<- solve
getmat<-function() xinv
list(set=set, get=get,
setmat=setmat,
getmat=getmat)
}
## The function cacheSolve checks if the inverse of the matrix required already exists in the cache or not. If it exists,
## it is returned, and if the inverse does not exist, it calculates the inverse.
cacheSolve <- function(x=matrix(), ...) {
xinv<-x$getmat()
if(!is.null(xinv)){
message("getting cached data")
return(xinv)
}
matrix<-x$get()
xinv<-solve(matrix, ...)
x$setmat(xinv)
xinv
}
|
4294b8832751dc1f724edd9a086a85748d0170f2
|
55a5e246d1318275a5a0f1fc9b2e1b080ab26fe7
|
/man/transform_adjust_brightness.Rd
|
c287e66ca4a28fa3dfa108c5168c420a3521d924
|
[
"MIT"
] |
permissive
|
mohamed-180/torchvision
|
610577f5b1dec7a628df8c047c41ec18376e35f5
|
0761c61441f838f1b0c6f3624c40542934fb24f8
|
refs/heads/main
| 2023-07-14T14:20:21.225664
| 2021-08-23T17:18:10
| 2021-08-23T17:18:10
| 399,161,368
| 0
| 0
|
NOASSERTION
| 2021-08-23T15:48:55
| 2021-08-23T15:48:54
| null |
UTF-8
|
R
| false
| true
| 2,067
|
rd
|
transform_adjust_brightness.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transforms-generics.R
\name{transform_adjust_brightness}
\alias{transform_adjust_brightness}
\title{Adjust the brightness of an image}
\usage{
transform_adjust_brightness(img, brightness_factor)
}
\arguments{
\item{img}{A \code{magick-image}, \code{array} or \code{torch_tensor}.}
\item{brightness_factor}{(float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.}
}
\description{
Adjust the brightness of an image
}
\seealso{
Other transforms:
\code{\link{transform_adjust_contrast}()},
\code{\link{transform_adjust_gamma}()},
\code{\link{transform_adjust_hue}()},
\code{\link{transform_adjust_saturation}()},
\code{\link{transform_affine}()},
\code{\link{transform_center_crop}()},
\code{\link{transform_color_jitter}()},
\code{\link{transform_convert_image_dtype}()},
\code{\link{transform_crop}()},
\code{\link{transform_five_crop}()},
\code{\link{transform_grayscale}()},
\code{\link{transform_hflip}()},
\code{\link{transform_linear_transformation}()},
\code{\link{transform_normalize}()},
\code{\link{transform_pad}()},
\code{\link{transform_perspective}()},
\code{\link{transform_random_affine}()},
\code{\link{transform_random_apply}()},
\code{\link{transform_random_choice}()},
\code{\link{transform_random_crop}()},
\code{\link{transform_random_erasing}()},
\code{\link{transform_random_grayscale}()},
\code{\link{transform_random_horizontal_flip}()},
\code{\link{transform_random_order}()},
\code{\link{transform_random_perspective}()},
\code{\link{transform_random_resized_crop}()},
\code{\link{transform_random_rotation}()},
\code{\link{transform_random_vertical_flip}()},
\code{\link{transform_resized_crop}()},
\code{\link{transform_resize}()},
\code{\link{transform_rgb_to_grayscale}()},
\code{\link{transform_rotate}()},
\code{\link{transform_ten_crop}()},
\code{\link{transform_to_tensor}()},
\code{\link{transform_vflip}()}
}
\concept{transforms}
|
94d15cea612b3c0e2bece9f0d810e63504eb5514
|
80dfcda1ebcb091dd2bef1d566683c206967aa7c
|
/cachematrix.R
|
1c604c6465139e4d61bb7ccab2c92a196196f8ab
|
[] |
no_license
|
Umar412/ProgrammingAssignment2
|
b68339979f62b2ba94fc703cd808c361e2367d31
|
769ca47a7039761569be4f4da0fbe3d66cc47c99
|
refs/heads/master
| 2022-11-05T21:41:11.269821
| 2020-06-16T20:15:23
| 2020-06-16T20:15:23
| 272,793,164
| 0
| 0
| null | 2020-06-16T19:20:02
| 2020-06-16T19:20:01
| null |
UTF-8
|
R
| false
| false
| 1,157
|
r
|
cachematrix.R
|
## makeCacheMatrix: This function creates a special “matrix” object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
f <- function(y){
x <<- y
inv <<- NULL
}
g <- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(f = f, g = g, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve: This function computes the inverse of the special “matrix” returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if(!is.null(inv)){
message("cached data") ## line 23: if the inverse is already solved and retrieved from the cache the "cached data"
return(inv) ## message will be displayed and the inverse would be returned.
}
mtx <- x$g()
inv <- solve(mtx, ...) ## line 27: otherwise the inverse would be solved and set the value of the invrse in the cache.
x$setInverse(inv)
inv
}
|
bdb20fc129dec5ef018b5a109543e03faccf662c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/uwIntroStats/examples/tableStat.Rd.R
|
fc2f484e5bfd1617b89dd0f5434d9bb97715e765
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,795
|
r
|
tableStat.Rd.R
|
library(uwIntroStats)
### Name: tableStat
### Title: Table of Stratified Descriptive Statistics
### Aliases: tableStat tableStat.default tableStat.do print.tableStat
### Keywords: ~kwd1 ~kwd2
### ** Examples
# Load required libraries
library(survival)
# Reading in a dataset
mri <- read.table("http://www.emersonstatistics.com/datasets/mri.txt",header=TRUE)
# Creating a Surv object to reflect time to death
mri$ttodth <- Surv(mri$obstime,mri$death)
# Reformatting an integer MMDDYY representation of date to be a Date object
mri$mridate <- as.Date(paste(trunc(mri$mridate/10000),trunc((mri$mridate %% 10000)/100),
mri$mridate %% 100,sep="/"),"%m/%d/%y")
# Cross tabulation of counts with sex and race strata
with (mri, tableStat (NULL, race, male, stat= "@count@ (r @row%@; c @col%@; t @tot%@)"))
# Cross tabulation of counts with sex, race, and coronary disease strata
# (Note row and column percentages are defined within the first two strata, while overall
# percentage considers all strata)
with (mri, tableStat (NULL, race, male, chd,
stat= "@count@ (r @row%@; c @col%@; t @tot%@)"))
# Description of time to death with appropriate quantiles
with (mri, tableStat(ttodth,probs=c(0.05,0.1,0.15,0.2),
stat="mean @mean@ (q05: @q@; q10: @q@; q15: @q@; q20: @q@; max: @max@)"))
# Description of mridate with mean, range stratified by race and sex
with (mri, tableStat(mridate, race, male,
stat="mean @mean@ (range @min@ - @max@)"))
# Stratified descriptive statistics with proportions
with (mri, tableStat(age,stat=">75: @p@; >85: @p@; [-Inf,75): @p@; [75,85): @p@;
[85,Inf): @p@"), above=c(75,85),lbetween=c(75,85))
# Descriptive statistics on a subset comprised of males
with (mri, tableStat(dsst,age,stroke,subset=male==1,
stat="@mean@ (@sd@; n= @count@/@missing@)"))
|
f43580b65d5b378bf5cb94ad2cd2d3277bc66c87
|
ab5871840e0b0e01d03feec960bd6478a5e49dca
|
/man/tests.Rd
|
3cdc634c4091b60bdae383006b10f905eeb8fc5a
|
[] |
no_license
|
jeff-hughes/paramtest
|
2551cf59e140406db965f3dee3b89cb547be1144
|
bee7be25e9bd6ef6c69c9d59291f0c451db3cfca
|
refs/heads/master
| 2020-05-21T06:15:45.463415
| 2017-10-24T14:16:49
| 2017-10-24T14:16:49
| 84,586,183
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 731
|
rd
|
tests.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{tests}
\alias{tests}
\alias{tests.paramtest}
\title{Return the parameter values that were tested by paramtest.}
\usage{
tests(test, ...)
\method{tests}{paramtest}(test, ...)
}
\arguments{
\item{test}{An object of type 'paramtest'.}
\item{...}{Not currently implemented; used to ensure consistency with S3 generic.}
}
\value{
Returns a data frame with one row for each set of tests that
was performed.
}
\description{
\code{tests} extracts information about the set of specific tests (parameter
values) for a parameter test.
}
\section{Methods (by class)}{
\itemize{
\item \code{paramtest}: Parameter values for a parameter test.
}}
|
ca170da4e1541ca0f15f2a1c411f28dbb2d8dbf8
|
18a07f5c173da511804cb30f4604d063063a34f9
|
/man/gamma_coin.Rd
|
3d46e978fdec7dbc17c41dd1e8c9792b2642e464
|
[
"CC-BY-4.0"
] |
permissive
|
rchan26/layeredBB
|
b0b61837e94f1c64b5aa9542e9608a332d2ee4c2
|
d40cd35bbb055409c21d2cc6612d00bc0fd80b94
|
refs/heads/master
| 2022-03-07T13:16:42.029976
| 2022-02-25T13:24:19
| 2022-02-25T13:24:19
| 188,111,543
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,085
|
rd
|
gamma_coin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{gamma_coin}
\alias{gamma_coin}
\title{Gamma coin flipper (Algorithm 26 in ST329)}
\usage{
gamma_coin(u, k, x, y, s, t, l, v)
}
\arguments{
\item{u}{simulated value from random U[0,1]}
\item{k}{integer value starting index for calculating the intervals}
\item{x}{start value of Brownian bridge}
\item{y}{end value of Brownian bridge}
\item{s}{start value of Brownian bridge}
\item{t}{end value of Brownian bridge}
\item{l}{lower bound of Brownian bridge}
\item{v}{upper bound of Brownian bridge}
}
\value{
boolean value: if T, accept probability that Brownian bridge remains
in [l,v], otherwise reject
}
\description{
Flips 'Gamma coin'; uses the Cauchy sequence S^{gamma}_{k} to
determine whether or not the Brownian bridge starting at x, ending at y, between [s,t]
remains in interval [l,v]
}
\examples{
gamma_coin(u = runif(1, 0, 1),
k = 0,
x = 0,
y = 0,
s = 0,
t = 1,
l = -0.5,
v = 0.5)
}
|
335981be08673fef2876a6ca03b4e5c14f3a0cfc
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/Luminescence/R/calc_HomogeneityTest.R
|
3080d581f30a78cb669982d9850c8a25a6bb1d79
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,994
|
r
|
calc_HomogeneityTest.R
|
#' Apply a simple homogeneity test after Galbraith (2003)
#'
#' A simple homogeneity test for De estimates
#'
#' For details see Galbraith (2003).
#'
#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
#' (\bold{required}): for \code{data.frame}: two columns with De
#' \code{(data[,1])} and De error \code{(values[,2])}
#' @param log \code{\link{logical}} (with default): peform the homogeniety test
#' with (un-)logged data
#' @param \dots further arguments (for internal compatibility only).
#' @return Returns a terminal output. In addition an
#' \code{\linkS4class{RLum.Results}} object is returned containing the
#' following element:
#'
#' \item{summary}{\link{data.frame} summary of all relevant model results.}
#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
#' used arguments} \item{call}{\link{call} the function call}
#'
#' The output should be accessed using the function
#' \code{\link{get_RLum}}
#' @section Function version: 0.2
#' @author Christoph Burow, University of Cologne (Germany)
#' @seealso \code{\link{pchisq}}
#' @references Galbraith, R.F., 2003. A simple homogeneity test for estimates
#' of dose obtained using OSL. Ancient TL 21, 75-77.
#' @examples
#'
#' ## load example data
#' data(ExampleData.DeValues, envir = environment())
#'
#' ## apply the homogeneity test
#' calc_HomogeneityTest(ExampleData.DeValues$BT998)
#'
#' @export
calc_HomogeneityTest <- function(
data,
log=TRUE,
...
){
##============================================================================##
## CONSISTENCY CHECK OF INPUT DATA
##============================================================================##
if(missing(data)==FALSE){
if(is(data, "data.frame") == FALSE & is(data, "RLum.Results") == FALSE){
stop("[calc_FiniteMixture] Error: 'data' object has to be of type
'data.frame' or 'RLum.Results'!")
} else {
if(is(data, "RLum.Results") == TRUE){
data <- get_RLum(data, signature(object = "De.values"))
}
}
}
##==========================================================================##
## ... ARGUMENTS
##==========================================================================##
extraArgs <- list(...)
## set plot main title
if("verbose" %in% names(extraArgs)) {
verbose<- extraArgs$verbose
} else {
verbose<- TRUE
}
##============================================================================##
## CALCULATIONS
##============================================================================##
if(log==TRUE){
dat<- log(data)
} else {
dat<- data
}
wi<- 1/dat[2]^2
wizi<- wi*dat[1]
mu<- sum(wizi)/sum(wi)
gi<- wi*(dat[1]-mu)^2
G<- sum(gi)
df<- length(wi)-1
n<- length(wi)
P<- pchisq(G, df, lower.tail = FALSE)
##============================================================================##
## OUTPUT
##============================================================================##
if(verbose == TRUE) {
cat("\n [calc_HomogeneityTest]")
cat(paste("\n\n ---------------------------------"))
cat(paste("\n n: ", n))
cat(paste("\n ---------------------------------"))
cat(paste("\n mu: ", round(mu,4)))
cat(paste("\n G-value: ", round(G,4)))
cat(paste("\n Degrees of freedom:", df))
cat(paste("\n P-value: ", round(P,4)))
cat(paste("\n ---------------------------------\n\n"))
}
##============================================================================##
## RETURN VALUES
##============================================================================##
summary<- data.frame(n=n,g.value=G,df=df,P.value=P)
call<- sys.call()
args<- list(log=log)
newRLumResults.calc_HomogeneityTest <- set_RLum(
class = "RLum.Results",
data = list(
summary=summary,
data=data,
args=args,
call=call
))
invisible(newRLumResults.calc_HomogeneityTest)
}
|
278ccfb35bfeda2c652a18586fb2f00ae6e9c0b9
|
16a4a8be49003375bdb2868f472697fe5313b62e
|
/Exploratory/Week3/plots3and6.R
|
f478639fe22ef5569c82d12a3d570d9530044091
|
[
"MIT"
] |
permissive
|
llattes/datasciencecoursera
|
42cd3c0881b79fb71ffcad08abd75ff926782961
|
bb807e97a077d1167780fac51413b526ab2aa62d
|
refs/heads/master
| 2021-01-17T15:25:23.879098
| 2015-09-02T17:40:17
| 2015-09-02T17:40:17
| 27,514,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,023
|
r
|
plots3and6.R
|
# Plot 6
# ------
## PA 2: Exploratory Data Analysis
## Plot 6
##
## Libraries needed:
library(ggplot2)
## Set working directory
setwd("D:\\Data Science Specialization\\Exploratory Data Analysis\\Course Project")
## Step 1: read in the data
## This first line will likely take a few seconds. Be patient!
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
## Q6: Compare emissions from motor vehicle sources in Baltimore City with emissions
## from motor vehicle sources in Los Angeles County, California (fips == "06037").
## Which city has seen greater changes over time in motor vehicle emissions?
## Baltimore City, Maryland (fips == "24510"),
## Los Angeles County, California (fips == "06037")
## Step 2: Searching for ON-ROAD type in NEI
## Searching for 'motor' in SCC only gave a subset (non-cars)
mvbalaPM25NEI <- NEI[(NEI$fips=="24510"|NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
length(mvbalaPM25NEI) # 6
## Step 3: Searching for motor Vehicles type in SCC
mvsrcSCC <- unique(grep("Vehicles", SCC$EI.Sector, ignore.case = TRUE, value = TRUE))
mvsrcSCC1 <- SCC[SCC$EI.Sector %in% mvsrcSCC, ]["SCC"]
## Subset the motor vehicles from NEI for Baltimore, MD and Los Angeles County, CA
mvbalaPM25NEISCC <- NEI[NEI$SCC %in% mvsrcSCC1$SCC &
(NEI$fips == "24510"|NEI$fips == "06037"),]
length(mvbalaPM25NEISCC) # 6
## Step 4: Comparision of the two search "ON-ROAD" and "Vehicles" to ensure
## that we captured the correct data
all.equal(mvbalaPM25NEI, mvbalaPM25NEISCC, tolerance = 0)
## Step 5: Find the emissions due to motor vehicles in Baltimore city
## and Los Angeles County using the search subset for "ON-ROAD"
## type (mvbalaPM25NEI - Obtained in above Step 2)
mvbacatotalPM25YrFips <- aggregate(Emissions ~ year + fips, mvbalaPM25NEI, sum)
mvbacatotalPM25YrFips$fips[mvbacatotalPM25YrFips$fips=="24510"] <- "Baltimore, MD"
mvbacatotalPM25YrFips$fips[mvbacatotalPM25YrFips$fips=="06037"] <- "Los Angeles, CA"
## Step 6: prepare to plot to png
png("plot6.png", width=840, height=480)
gbaLA <- ggplot(mvbacatotalPM25YrFips, aes(factor(year), Emissions))
gbaLA <- gbaLA + facet_grid(. ~ fips)
gbaLA <- gbaLA + geom_bar(stat="identity") +
xlab("Year") +
ylab(expression("Total PM"[2.5]*" Emissions (tons)")) +
ggtitle(expression("Baltimore City, MD vs Los Angeles County, CA PM"[2.5]*
" Motor Vehicle Emission 1999-2008"))
print(gbaLA)
dev.off()
# Plot 3
# ------
## PA 2: Exploratory Data Analysis
## Plot 3
##
## Libraries needed:
library(ggplot2)
## Set working directory
setwd("D:\\Data Science Specialization\\Exploratory Data Analysis\\Course Project")
## Step 1: read in the data
## This first line will likely take a few seconds. Be patient!
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
## Q3: Of the four types of sources indicated by the type (point, nonpoint,
## onroad, nonroad) variable, which of these four sources have seen decreases
## in emissions from 1999-2008 for Baltimore City? Which have seen increases
## in emissions from 1999-2008? Use the ggplot2 plotting system to make a plot
## answer this question. Baltimore City, Maryland (fips == "24510")
## Step 2: obtain the subsets to plot
baltimore <- NEI[NEI$fips=="24510", ]
totalPM25byYearType <- aggregate(Emissions ~ year + type, baltimore, sum)
## Step 3: prepare to plot to png
png("plot3.png", width=640, height=480)
g <- ggplot(totalPM25byYearType, aes(year, Emissions, color = type))
g <- g + geom_line() + xlab("Year") +
ylab(expression("Total PM"[2.5]*" Emissions (tons)")) +
ggtitle(expression("Baltimore City PM"[2.5]*" Emission by Source and Year"))
print(g)
dev.off()
# Another Plot 3
# --------------
library(dplyr)
library(ggplot2)
# Read data
dfx <- readRDS("summarySCC_PM25.rds")
# Filter for Baltimore City county
dfx.bc <- filter(dfx, fips == "24510")
# Calculate emissions by year and type for filtered data
dfx.totals <- summarize(
group_by(dfx.bc, year, type),
Total.Emissions = sum(Emissions)
)
# Plot the emissions by year for each type
png("plot3.png", width=768, height=480)
p <- ggplot(dfx.totals, aes(x=year, y=Total.Emissions, group = type)) +
geom_line(aes(color=type)) +
geom_point() +
labs(title = "Total PM2.5 Emissions in Baltimore City by Type") +
labs(x = "Year") +
labs(y = "Total Emissions (in tons)")
print(p)
dev.off()
# Another Plot 3
# --------------
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI<-NEI[NEI$fips=="24510",]
df<-aggregate(NEI$Emissions,list(year=NEI$year,type=NEI$type),sum)
ggplot(data=df, aes(x=year, y=x, group=type, colour=type)) +
geom_line() + geom_point() + xlab("Year") + ylab("Emissions, tons") +
ggtitle("PM2.5 Emissions, Baltimore City, MD, 1999-2008") +
scale_colour_discrete(name="Type of Source")
ggsave(filename="plot3.png",width=6,height=6)
|
d9feab5308bc6c9798956f47dffba6157ff28cf1
|
ce58f13cf8a15cc817317ef3ee4d55728972b663
|
/plot2.R
|
7e504ecb14ed28940271e56e542617d5e2f98bf4
|
[] |
no_license
|
Lkhagvaa-erdenesuren/ExData_Plotting1
|
9fb9767d0581f14dfd61ae8589f25d8a86a70731
|
f7dc74e431543c2cc55cfc89805a3aa746627bef
|
refs/heads/master
| 2020-12-03T01:41:31.593323
| 2016-01-11T17:39:16
| 2016-01-11T17:39:16
| 49,362,564
| 0
| 0
| null | 2016-01-10T10:35:51
| 2016-01-10T10:35:50
| null |
UTF-8
|
R
| false
| false
| 560
|
r
|
plot2.R
|
library(datasets)
alldata <- "./data/household_power_consumption.txt"
data <- read.table(alldata, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
powerconsump2daysdata <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
date_time <- strptime(paste(powerconsump2daysdata$Date, powerconsump2daysdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
global_actpower <- as.numeric(powerconsump2daysdata$Global_active_power)
png("plot2.png", width=480, height=480)
plot(date_time, global_actpower, type="l", xlab=" ", ylab="Global Active Power (kilowatts)" )
dev.off()
|
e1dcf270a1aa095ed0d89e1cd48a8d1651083161
|
b00d5c221259a7f5d899d84e04ef57712b312a4e
|
/rprog.R
|
150b6bbf6d0ca6888a0af2ca11e7e92b18270b2e
|
[] |
no_license
|
masymbol/customer_experience
|
365e4de7139fc3a8af61ab248c6d22ee9cb792fc
|
4c82ef8c3611dc5d543aacc0e0ea7ad0b892ac8a
|
refs/heads/master
| 2021-03-27T16:15:30.131943
| 2014-12-27T10:23:00
| 2014-12-27T10:23:00
| 24,134,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,993
|
r
|
rprog.R
|
library(bitops)
library(digest)
library(RCurl)
library(NLP)
library(RColorBrewer)
library(ROAuth)
library(bitops)
library(RJSONIO)
library(stringr)
library(tm)
library(httr)
library(wordcloud)
library(devtools)
library(twitteR)
library(plyr)
library(stringr)
library(twitteR)
api_key <- "fmC6OcWB4jqwBT7bRmVssagmP"
api_secret <- "3e2Y9jfPVqwUgQtEMwGaIQYrjGLe1DnG3xEMmQBnHaqQcduc94"
access_token <- "2608974788-74mmpYz4VH9dKsypPCd5ZuIvhWi9Wcnm5S7JADW"
access_token_secret <- "w7jrgfrPW5LfjQOkEjyhL6Jm5tZLoLe6vpN1cp5caaIIN"
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
#bigdata <- searchTwitter("#iphone")
tweets = searchTwitter("#cricket",n=200)
print("search completed...")
bigdata.df <-do.call (rbind,lapply(tweets,as.data.frame))
write.csv(bigdata.df,"/home/raghuvarma/Documents/nodejs_examples/social-media/iphone.csv")
Tweets.text = laply(tweets,function(t)t$getText())
pos = scan('/home/raghuvarma/Desktop/swaps/project/positive-words.txt', what='character', comment.char=';')
neg = scan('/home/raghuvarma/Desktop/swaps/project/negative-words.txt', what='character', comment.char=';')
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
scores = laply(sentences, function(sentence, pos.words, neg.words) {
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
sentence = tolower(sentence)
word.list = str_split(sentence, '\\s+')
words = unlist(word.list)
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
analysis = score.sentiment(Tweets.text, pos, neg)
table(analysis$score)
mean(analysis$score)
hist(analysis$score)
View(analysis)
############################## most +ve and _ve tweets ############################
a <- grep(3, analysis$score) #find 3 of score
print("most +ve tweet :-")
p<-max(analysis$score,na.rm=TRUE) # find max
q<-min(analysis$score,na.rm=TRUE)
dfpv1<-analysis[which(analysis$score==p),]
dfng1<-analysis[which(analysis$score==q),]
write.table(dfpv1,"/home/raghuvarma/Desktop/swaps/most_pos.csv")
write.table(dfng1,"/home/raghuvarma/Desktop/swaps/most_neg.csv")
print("some +ve tweets :-")
df2 <- analysis[which(analysis$score==3 | analysis$score==2 | analysis$score==1),]
write.table(df2,"/home/raghuvarma/Desktop/swaps/some_pos.csv")
print("some -ve tweets :-")
df3 <- analysis[which(analysis$score==-3 | analysis$score==-2 | analysis$score==-1),]
write.table(df3,"/home/raghuvarma/Desktop/swaps/some_neg.csv")
source("/home/raghuvarma/Desktop/swaps/twitter/WordCloud.R")
|
c714b0a06b07f97ab0aab69e8c820886c41a63e6
|
860efbde82499c1cc307e36b57f6af41fe37225e
|
/man/analyze.p2.Rd
|
a89f9e54e97fd4d6eb107ba855a567dead8513a1
|
[] |
no_license
|
cran/gainML
|
92a2ffb79ca5026e9e509edcdc1cc43b151ddb92
|
f85e402726004d6f9a31f812cc0a66bf83eabffc
|
refs/heads/master
| 2020-12-21T23:34:19.542072
| 2019-06-28T12:40:07
| 2019-06-28T12:40:07
| 236,601,381
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,696
|
rd
|
analyze.p2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/period2.R
\name{analyze.p2}
\alias{analyze.p2}
\title{Apply Period 2 Analysis}
\usage{
analyze.p2(per1, per2, opt.cov)
}
\arguments{
\item{per1}{A dataframe containing the period 1 data.}
\item{per2}{A dataframe containing the period 2 data.}
\item{opt.cov}{A character vector indicating the optimal set of variables
(obtained from the period 1 analysis).}
}
\value{
The function returns a list of the following datasets. \describe{
\item{\code{pred.REF}}{A dataframe including the period 2 prediction for
the REF turbine.} \item{\code{pred.CTR}}{A dataframe including the period 2
prediction for the CTR-b turbine.} }
}
\description{
Conducts period 2 analysis; uses the optimal set of variables obtained in the
period 1 analysis to predict the power output of REF and CTR-b turbines in
period 2.
}
\examples{
df.ref <- with(wtg, data.frame(time = time, turb.id = 1, wind.dir = D,
power = y, air.dens = rho))
df.ctrb <- with(wtg, data.frame(time = time, turb.id = 2, wind.spd = V,
power = y))
df.ctrn <- df.ctrb
df.ctrn$turb.id <- 3
data <- arrange.data(df.ref, df.ctrb, df.ctrn, p1.beg = '2014-10-24',
p1.end = '2014-10-25', p2.beg = '2014-10-25', p2.end = '2014-10-26',
k.fold = 2)
p1.res <- analyze.p1(data$train, data$test, ratedPW = 1000)
p2.res <- analyze.p2(data$per1, data$per2, p1.res$opt.cov)
}
\references{
H. Hwangbo, Y. Ding, and D. Cabezon, 'Machine Learning Based
Analysis and Quantification of Potential Power Gain from Passive Device
Installation,' arXiv:1906.05776 [stat.AP], Jun. 2019.
\url{https://arxiv.org/abs/1906.05776}.
}
|
8a95d9a7cc8599202069287b1376f471cb8128d9
|
5524d53f97e4af319ceebc8bd5dabd2e81fd789c
|
/R/glossary_class.R
|
ad64da62b772eb5a0c067200af90ffce891898c5
|
[] |
no_license
|
zachary-foster/glossary
|
220f9d51b38a85e7828cb4e0ab8ab7f739413dac
|
fd43b669d1bcd326722b0d8faaead83357b82db2
|
refs/heads/master
| 2020-03-22T17:14:33.280555
| 2018-07-14T09:04:01
| 2018-07-14T09:04:01
| 140,383,414
| 6
| 1
| null | 2018-07-14T09:00:00
| 2018-07-10T05:51:03
|
R
|
UTF-8
|
R
| false
| false
| 6,960
|
r
|
glossary_class.R
|
#' Glossary class
#'
#' This is used to add terms to a glossary
#'
#' @param definitions_path Where the the definitions of terms are stored. This
#' is used to show the definitions when hovering over a glossary term in the
#' text.
#' @param glossary_path The file the glossary will be added to. This is used to
#' link glossary terms in the text to their definitions in the rendered
#' glossary.
#' @param terms_used The terms that will be used. Adding terms to the
#' constructor (instead of `my_gloss$add("new term")`) will include them as if
#' they were added with `my_gloss$add()`.
#' @param header_level How big the headers are for each term in the rendered
#' glossary. Larger numbers mean smaller titles.
#'
#' @return An `R6Class` object of class `Glossary`
#' @family classes
#'
#' @examples
#' \dontrun{
#' my_gloss <- glossary()
#' }
#'
#' @export
glossary <- function(definitions_path, glossary_path = "", terms_used = c(), header_level = 3) {
Glossary$new(
definitions_path = definitions_path,
glossary_path = glossary_path,
terms_used = terms_used,
header_level = header_level
)
}
Glossary <- R6::R6Class(
"Glossary",
public = list(
definitions_path = NULL, # Where the the definitions of terms are stored
glossary_path = NULL, # The file(s) the glossary will be added to
terms_used = c(), # The terms used so far in this glossary
initialize = function(definitions_path, glossary_path, terms_used = c(), header_level = 3) {
self$definitions_path <- definitions_path
self$glossary_path <- glossary_path
self$terms_used <- terms_used
private$term_html <- render_definitions_html(definitions_path, header_level = header_level)
private$term_rmd <- render_definitions_rmd(definitions_path, header_level = header_level)
},
print = function(indent = " ") {
cat(paste0(indent, "<Glossary>\n"))
cat(paste0(indent, paste0("definitions_path: ", self$definitions_path, "\n")))
cat(paste0(indent, paste0("glossary_path: ", paste0(self$glossary_path, collapse = ", "), "\n")))
cat(paste0(indent, paste0("terms_used: ", paste0(self$terms_used, collapse = ", "), "\n")))
invisible(self)
},
add = function(new_term, shown = NULL) {
if (is.null(shown)) {
shown <- new_term
}
if (! is.character(new_term)) {
stop("Glossary terms must be of type `character`.")
}
if (length(new_term) != 1) {
stop("Glossary terms must be of length 1.")
}
if (! standardize(new_term) %in% standardize(names(private$term_html))) {
warning(paste0('The term "', new_term, '" cannot be found in the definitions at "',
self$definitions_path, "' so no link will be added."))
return(shown)
}
if (! standardize(new_term) %in% standardize(self$terms_used)) {
self$terms_used <- c(self$terms_used, standardize(new_term))
}
# Format link to glossary
if (is.null(self$glossary_path) || self$glossary_path == "" ) {
glossary_path_html <- ""
} else {
glossary_path_html <- paste0(tools::file_path_sans_ext(self$glossary_path), ".html")
}
output <- paste0('<a href ="', glossary_path_html, '#', term_anchor_name(new_term), '">', shown, '</a>')
# Add html div of glossary contents to reveal when cursor hovers
# term_gloss_html <- private$term_html[tolower(new_term) == tolower(names(private$term_html))]
# term_gloss_html <- sub(term_gloss_html, pattern = "^<div ", replacement = '<div class="glossary_div" ')
# output <- paste0(output, "\n", private$term_html)
return(output)
},
render = function(mode = "html") {
if (mode == "md") {
output <- paste0(private$term_rmd[sort(self$terms_used)], collapse = "\n")
} else if (mode == "html") {
output <- paste0(private$term_html[sort(self$terms_used)], collapse = "\n")
} else {
stop("mode must be 'html' or 'md'")
}
knitr::asis_output(output)
},
render_all = function(mode = "html") {
if (mode == "md") {
output <- paste0(private$term_rmd[sort(names(private$term_rmd))], collapse = "\n")
} else if (mode == "html") {
output <- paste0(private$term_html[sort(names(private$term_html))], collapse = "\n")
} else {
stop("mode must be 'html' or 'md'")
}
knitr::asis_output(output)
}
),
private = list(
term_html = NULL,
term_rmd = NULL
)
)
render_definitions_html <- function(definition_path, header_level = 3) {
# Render Rmd file into HTML and save as a vector of length 1
output_path <- tempfile()
rmarkdown::render(definition_path, output_format = rmarkdown::html_document(), output_file = output_path, quiet = TRUE)
raw_html <- readr::read_file_raw(output_path)
# Extract the rendered HTML for each definition
parsed_html <- xml2::read_html(raw_html)
parsed_divs <- xml2::xml_find_all(parsed_html, "//div/div")
parsed_term_html <- as.character(parsed_divs[grepl(parsed_divs, pattern = "section")])
term_names <- stringr::str_match(parsed_term_html, "<h[0-9]{1}>\n*(.+)\n*</h[0-9]{1}>")[,2]
# Reset header level and add anchor
parsed_term_html <- sub(parsed_term_html, pattern = 'class="section level[0-9]{1}"',
replacement = paste0('class="section level', header_level, '"'))
anchor_name <- term_anchor_name(term_names)
parsed_term_html <- vapply(seq_along(parsed_term_html), FUN.VALUE = character(1), function(i) {
sub(parsed_term_html[i], pattern = '<h[0-9]{1}>',
replacement = paste0('<h', header_level, '><a class="glossary_anchor" id="', anchor_name[i], '">'))
})
parsed_term_html <- sub(parsed_term_html, pattern = '</h[0-9]{1}>',
replacement = paste0('</a></h', header_level, '>'))
# Name by term and return
names(parsed_term_html) <- term_names
return(parsed_term_html)
}
render_definitions_rmd <- function(definition_path, header_level = 3) {
raw_rmd <- readr::read_file(definition_path)
# Extract the rendered HTML for each definition
parsed_rmd <- stringr::str_split(raw_rmd, "\n#{1,5}")[[1]][-1]
parsed_rmd <- trimws(parsed_rmd)
term_names <- stringr::str_match(parsed_rmd, "^(.+)\n")[,2]
parsed_rmd <- sub(parsed_rmd, pattern = "^(.+?)\n", replacement = "")
parsed_rmd <- trimws(parsed_rmd)
# Add headers and spacing
parsed_rmd <- paste0(paste0(rep("#", header_level), collapse = ""), " ",
term_names, "\n\n", parsed_rmd, "\n\n")
# Name by term and return
names(parsed_rmd) <- term_names
return(parsed_rmd)
}
term_anchor_name <- function(term_name) {
paste0(gsub(pattern = " ", replacement = "_", standardize(term_name)), "_anchor")
}
standardize <- function(term) {
term <- tolower(term)
term <- gsub(term, pattern = "’", replacement = "'", fixed = TRUE)
return(term)
}
|
1feb8b1aa1ea26c9fba16572332136a5c23b179b
|
9465052503f31b26d516a85808a95a5d1ae5e11c
|
/tests/testthat/test-anomaly.R
|
94f020e64b4e00487e824229a8078e00c7291d4e
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/caliver
|
26e03662a1c2c979c00d178e8ac6e59c58b6f701
|
1b82bc4e5476cba3a16782370505bc0b50241569
|
refs/heads/master
| 2022-02-25T01:44:23.904450
| 2022-02-21T17:51:40
| 2022-02-21T17:51:40
| 73,203,648
| 16
| 7
| null | 2021-03-18T13:35:00
| 2016-11-08T16:11:16
|
R
|
UTF-8
|
R
| false
| false
| 150
|
r
|
test-anomaly.R
|
context("anomaly")
test_that("Testing the file anomaly.R", {
x <- anomaly(r, b, asEFFIS = TRUE)
expect_true(raster::cellStats(x, max) == 6)
})
|
58f9ddd1031a3b569b8bc56eab70b3c93c2d5c98
|
cbede1f778ec1b69c51d5f90e66edf1bfeee18a0
|
/README.rd
|
768e9611b9850bb19f8df57e6535dc891726d633
|
[] |
no_license
|
gbsnaker/django_demo
|
88e862176be5ebbb3874a9d9a6b4e386f65c2b58
|
0e39a886b5747e7687a9f856971201cfb38a743a
|
refs/heads/master
| 2020-12-30T15:54:32.424971
| 2017-05-13T18:54:09
| 2017-05-13T18:54:09
| 91,182,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136
|
rd
|
README.rd
|
# django demo
<h6>add study 3</h6>
<h6>add bootstrap</h6>
<h6>sublime bootstrap autosnippet s3-***</h6>
<h6>add jinja2 variables </h6>
|
82ad9085b0cec8432b6a0a1c9114605ec32fd4eb
|
d3902442ba45fddad61a36dd844fcd0862a08f90
|
/man/check_connection.Rd
|
4e224b9edb1b9911c7737226908e8eb3d34ad66b
|
[
"MIT"
] |
permissive
|
JasperHG90/sleepsimRapiClient
|
d003caca1d2e7a46cfcc73f38ab108f18b14917c
|
6ba993ee7565490e7b5ff3123622f034bb60d194
|
refs/heads/master
| 2021-03-07T20:56:14.915862
| 2020-05-14T08:30:35
| 2020-05-14T08:30:35
| 246,296,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 318
|
rd
|
check_connection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{check_connection}
\alias{check_connection}
\title{Check if connection to server is possible}
\usage{
check_connection()
}
\value{
TRUE if connection active, else FALSE
}
\description{
Check if connection to server is possible
}
|
c9e87a52f930a374903a25bed1648eb1037ccbe3
|
5dc84c75f79ef6114daa20080d0677200c037f98
|
/man/gdcFilterDuplicate.Rd
|
78ff653ce7fbf8d84da9e4f71c9dd50c78696fc7
|
[
"Apache-2.0"
] |
permissive
|
rli012/GDCRNATools
|
6a25f3e72ea716b4f60e36e725caea557f6a6e29
|
e2c4f4e8c40041b1f3d374f9a5a561949e803830
|
refs/heads/master
| 2023-08-25T04:18:18.876861
| 2022-08-20T01:44:51
| 2022-08-20T01:44:51
| 112,437,042
| 14
| 9
|
Apache-2.0
| 2023-08-04T09:01:57
| 2017-11-29T06:37:47
|
R
|
UTF-8
|
R
| false
| true
| 690
|
rd
|
gdcFilterDuplicate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdcFilterSamples.R
\name{gdcFilterDuplicate}
\alias{gdcFilterDuplicate}
\title{Filter out duplicated samples}
\usage{
gdcFilterDuplicate(metadata)
}
\arguments{
\item{metadata}{metadata parsed from \code{\link{gdcParseMetadata}}}
}
\value{
A filtered dataframe of metadata without duplicated samples
}
\description{
Filter out samples that are sequenced for two or more times
}
\examples{
####### Parse metadata by project id and data type #######
metaMatrix <- gdcParseMetadata(project.id='TARGET-RT', data.type='RNAseq')
metaMatrix <- gdcFilterDuplicate(metadata=metaMatrix)
}
\author{
Ruidong Li and Han Qu
}
|
61fa8b07fb37f3c7baca902f9dbf4a347aa8e71c
|
be8c9660ff29a44d1835b74b3ec861cd76adb834
|
/methods/analysis-glint_get-glint-dependencies.R
|
8a4e10b5647147102bb67c92c2ed781cc18fc26d
|
[] |
no_license
|
metamaden/recountmethylation_flexible-blood-analysis_manuscript
|
ec9ba3666db953430ec1be509a826d45fba97f57
|
ec835f346da6bcb628ac262d22c5827936610981
|
refs/heads/main
| 2023-04-16T20:18:31.234484
| 2023-02-02T20:33:38
| 2023-02-02T20:33:38
| 401,501,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
analysis-glint_get-glint-dependencies.R
|
library(basilisk)
# define env attributes
env.name <- "dnam_si"
pkgv <- c("hnswlib==0.5.1", "pandas==1.2.2", "numpy==1.20.1",
"mmh3==3.0.0", "h5py==3.2.1")
|
dd576a611a4d21d2d770b615f83eb8f9f2ab75cd
|
34b94b2de56a8f7023487b0e33b5eb9d1863ce09
|
/man/get_pm_fund_info.Rd
|
e2195a08ae18afc55033d5b3114da6fad87222ec
|
[] |
no_license
|
AZASRS/AZASRS
|
b42a9ddba0c249281e723ce160a633aedfc583b0
|
b862856345f658664ed7b0cc1e55899fa25e552d
|
refs/heads/master
| 2021-06-15T18:27:33.491298
| 2020-09-24T17:07:40
| 2020-09-24T17:07:40
| 137,259,227
| 0
| 3
| null | 2021-01-08T16:31:08
| 2018-06-13T19:06:52
|
R
|
UTF-8
|
R
| false
| true
| 1,777
|
rd
|
get_pm_fund_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_pm_fund_info.R
\name{get_pm_fund_info}
\alias{get_pm_fund_info}
\title{Get all pm_fund_info}
\usage{
get_pm_fund_info(
con = AZASRS_DATABASE_CONNECTION(),
add_benchmark = FALSE,
return_tibble = TRUE
)
}
\arguments{
\item{con}{is a database connection object from AZASRS::AZASRS_DATABASE_CONNECTION()}
\item{return_tibble}{is a boolean that determines whether or not a tibble is returned instead}
\item{add_benckmark}{is a boolean that appends multiple benchmarks as their own columns. i.e. PVT_Benchmark, SAA_Benchmark, etc.}
}
\value{
Returns a tibble or SQL result with all pm_fund_info metadata.
}
\description{
A view to get all private market fund info, can be filtered. By default, SQL --> SELECT * FROM all_pm_fund_info;
}
\examples{
get_pm_fund_info()
# A tibble: 282 x 26
# pm_fund_info_id pm_fund_id pm_fund_descrip… pm_fund_common_… vintage commit unfunded legacy specialist
# <int> <chr> <chr> <chr> <int> <int> <int> <chr> <chr>
# 1 Hgh19 AP Mezzanine Pa… HPS Mezz 2019 2019 6.00e8 3.95e8 A " "
# 2 HghBr AP Mezzanine Pa… HPS Mezz 2 2013 2.00e8 1.30e7 A " "
# 3 HghBr3 AP Mezzanine Pa… HPS Mezz 3 2016 5.00e8 9.85e7 A " "
# … with 279 more rows, and 17 more variables: invest_end <date>, term_end <date>, extension <dbl>,
# ext_time <dbl>, ext_used <dbl>, fee_cat <chr>, consultant <chr>, adv_board <lgl>, obsvr <lgl>,
# fund_size_m <dbl>, closed <chr>, pm_fund_category <chr>, pm_fund_category_description <chr>,
# pm_fund_portfolio <chr>, pm_fund_sponsor <chr>, pm_fund_city <chr>, pm_fund_sector <chr>
}
|
27ca26d2459f261480cf7e92256bd5ffcf58713b
|
2af4939100afa621c6c9fedf4749bb10798bf209
|
/Transitions.Rd
|
b276b84610f03f92edec4914d451437334a5258e
|
[] |
no_license
|
mcattau/code_efficiency
|
ef0688197c5e388e6e1814f2ac2160b2997f385a
|
b81027b7fd3e62dd763136fb0d0cd8e81e74711e
|
refs/heads/master
| 2020-12-30T23:36:58.170672
| 2017-04-05T17:58:54
| 2017-04-05T17:58:54
| 86,605,674
| 0
| 1
| null | 2017-04-06T18:01:42
| 2017-03-29T16:35:27
|
R
|
UTF-8
|
R
| false
| false
| 84,086
|
rd
|
Transitions.Rd
|
## Megan Cattau
## Earth Lab, Project Forest
## Contact info: megan.cattau@gmail.com or megan.cattau@colorado.edu, 706.338.9436
## Project: Disturbance Interactions in the Southern Rockies
## Project overview: Forest transitions (i.e., Changes in ecosystem type / forest composition and structure) as a function of disturbance history across the Southern Rockies
# This code addresses:
# Q1: When is forest 'recovered'?
# a. At how many years sonce fire does post-fire VCF Resemble pre-fire VCF?
# b. Does this vary as a function of pre-fire MPB infestation?
# c. Does this vary as a function of pre-fire VCF?
# Q2: Is a transition more likely to occur if fire is preceded by beetle infestation?
# Q3: How many years after beetle infestation does a fire have a similar recovery trajectory as an area that did not experience infestation?
# Data associated with this code:
fish_pts_SR2.txt
fish_pts_SR3.txt
fish_pts_SR4.txt
fish_pts_SR5.txt
# Target journal(s): Forest ecology and management
# Global change biology
# Reach - TREE, but prob for later papers. Review paper instead?
#### NOTE TO MAX:
# Areas of the code that I know could be more efficient ara flagged with "### Could be more efficient"
setwd("/Users/megancattau/Dropbox/0_EarthLab/Disturbance")
setwd("/Users/meca3122/Dropbox/0_EarthLab/Disturbance")
getwd()
# Import fire data
# These are 250m rasters of the Geomac / MTBS data sampled at fishnet label points (corresponding to 250m fishnet). A separate raster was sampled for each year, and the fire-present values in each raster are the year that that fire occurred. Values for pixels where there was no fire are 0 or -9999
fire1<-read.table("fish_pts_SR2.txt", header=TRUE, sep=",")
names(fire1)
names(fire1)<-c("FID", "Id", "1984", "1986", "1987", "1988", "1989", "1990", "1993", "1994", "1996", "1997", "1998", "1999", "2000")
fire2<-read.table("fish_pts_SR3.txt", header=TRUE, sep=",")
names(fire2)
names(fire2)<-c("FID", "Id", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015")
# merge two fire data sets and get extra ID and FID cols out of there
fire<-cbind(fire1, fire2)
names(fire)
fire<-fire[,c(1,3:15, 18:32)]
# subset fire data to keep just pixels that have experienced fire in any (but not every) year
# keep rows where any row > 0
fire_yes<-fire[apply(fire[, -1], MARGIN = 1, function(x) any(x > 0)), ]
names(fire_yes)
# Get the max year for each row (i.e. year of last burn)
fire_yes$last_burn<-apply(fire_yes[,-1], 1, max)
head(fire_yes, n=50)
# Import mountain pine beetle (MPB) data
# These are 250m rasters of mountain pine beetle (MPB) infestation presence from the Aerial Detection Survey data sampled at fishnet label points (corresponding to 250m fishnet). A separate raster was sampled for each year, and the MPB-present values in each raster are the area of the infestation. Values for pixels where there was no infestation are 0 or -9999
MPB<-read.table("fish_pts_SR4.txt", header=TRUE, sep=",")
names(MPB)
names(MPB)<-c("FID", "Id", "1994", "1995", "1996", "1997", "1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015")
MPB<-MPB[,-2]
# change MPB to year rather than area
### Could be more efficient
MPB$"1994mpb"<-ifelse(MPB$"1994">0, 1994, 0)
MPB$"1995mpb"<-ifelse(MPB$"1995">0, 1995, 0)
MPB$"1996mpb"<-ifelse(MPB$"1996">0, 1996, 0)
MPB$"1997mpb"<-ifelse(MPB$"1997">0, 1997, 0)
MPB$"1998mpb"<-ifelse(MPB$"1998">0, 1998, 0)
MPB$"1999mpb"<-ifelse(MPB$"1999">0, 1999, 0)
MPB$"2000mpb"<-ifelse(MPB$"2000">0, 2000, 0)
MPB$"2001mpb"<-ifelse(MPB$"2001">0, 2001, 0)
MPB$"2002mpb"<-ifelse(MPB$"2002">0, 2002, 0)
MPB$"2003mpb"<-ifelse(MPB$"2003">0, 2003, 0)
MPB$"2004mpb"<-ifelse(MPB$"2004">0, 2004, 0)
MPB$"2005mpb"<-ifelse(MPB$"2005">0, 2005, 0)
MPB$"2006mpb"<-ifelse(MPB$"2006">0, 2006, 0)
MPB$"2007mpb"<-ifelse(MPB$"2007">0, 2007, 0)
MPB$"2008mpb"<-ifelse(MPB$"2008">0, 2008, 0)
MPB$"2009mpb"<-ifelse(MPB$"2009">0, 2009, 0)
MPB$"2010mpb"<-ifelse(MPB$"2010">0, 2010, 0)
MPB$"2011mpb"<-ifelse(MPB$"2011">0, 2011, 0)
MPB$"2012mpb"<-ifelse(MPB$"2012">0, 2012, 0)
MPB$"2013mpb"<-ifelse(MPB$"2013">0, 2013, 0)
MPB$"2014mpb"<-ifelse(MPB$"2014">0, 2014, 0)
MPB$"2015mpb"<-ifelse(MPB$"2015">0, 2015, 0)
# Get the max year for each row (i.e. year of last MPB infestation)
names(MPB)
MPB<-MPB[,c(-23:-2)]
MPB$last_infest<-apply(MPB[,-1], 1, max)
head(MPB, n=50)
# subset fire 1994-2015 (same range as beetle data)
names(fire_yes)
fire_yes<-fire_yes[,c(1,9:30)]
# No fires in 1995, so add that in there
fire_yes$"1995"<-rep(0,nrow(fire_yes))
# Merge fire and MPB together
merged_MPB_fire<-merge(fire_yes, MPB, by="FID")
names(merged_MPB_fire)
# vars yyyy = fire
# vars yyyympb = MPB
# Get years before fire that MPB infestation happened, just for rows that have had MPB and fire
# This is year of most recent fire minus year of most recent infestation (if same year = 0, if not both = -9999)
merged_MPB_fire$yrs_infest_bf_fire<-ifelse((merged_MPB_fire$last_infest>0 & merged_MPB_fire$last_burn > 0), merged_MPB_fire$last_burn-merged_MPB_fire$last_infest, -9999)
names(merged_MPB_fire)
head(merged_MPB_fire, n=50)
####################
write.csv(merged_MPB_fire, "merged_MPB_fire.csv")
####################
# Import VCF
# These are 250m rasters of MODIS vegetation continuous fields (VCF) data, or percent woody vegetation per pixel, sampled at fishnet label points (corresponding to 250m fishnet). A separate raster was sampled for each year. Value 200 is water and 253 is NA
VCF<-read.table("fish_pts_SR5.txt", header=TRUE, sep=",")
names(VCF)
names(VCF)<-c("FID", "Id", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015")
VCF<-VCF[,-2]
### Change VCF > 100 to NA (200 is water and 253 is NA)
### Could be more efficient
VCF$"2000"<-ifelse(VCF$"2000">100, NA, VCF$"2000")
VCF$"2001"<-ifelse(VCF$"2001">100, NA, VCF$"2001")
VCF$"2002"<-ifelse(VCF$"2002">100, NA, VCF$"2002")
VCF$"2003"<-ifelse(VCF$"2003">100, NA, VCF$"2003")
VCF$"2004"<-ifelse(VCF$"2004">100, NA, VCF$"2004")
VCF$"2005"<-ifelse(VCF$"2005">100, NA, VCF$"2005")
VCF$"2006"<-ifelse(VCF$"2006">100, NA, VCF$"2006")
VCF$"2007"<-ifelse(VCF$"2007">100, NA, VCF$"2007")
VCF$"2008"<-ifelse(VCF$"2008">100, NA, VCF$"2008")
VCF$"2009"<-ifelse(VCF$"2009">100, NA, VCF$"2009")
VCF$"2010"<-ifelse(VCF$"2010">100, NA, VCF$"2010")
VCF$"2011"<-ifelse(VCF$"2011">100, NA, VCF$"2011")
VCF$"2012"<-ifelse(VCF$"2012">100, NA, VCF$"2012")
VCF$"2013"<-ifelse(VCF$"2013">100, NA, VCF$"2013")
VCF$"2014"<-ifelse(VCF$"2014">100, NA, VCF$"2014")
VCF$"2015"<-ifelse(VCF$"2015">100, NA, VCF$"2015")
# merge data
names(merged_MPB_fire)
merged_MPB_fire_VCF<-merge(merged_MPB_fire, VCF, "FID")
names(merged_MPB_fire_VCF)
head(merged_MPB_fire_VCF)
# vars yyyy.x = fire
# vars yyyympb = MPB
# vars yyyy.y = VCF
######### VCF 0-20 years after fire ########
# The below looks at VCF 0-20 years after a fire
# Outstanding: bias as get further away from fire year because less opportunity to capture pre-fire infestation. For example, VCF_since_fire0 begins with fires in 1999 and MPB dataset starts at 1994, so 6 years to capture MPB. By VCF_since_fire5, could start with fires in 1994 to capture VCF 5 years later (bc VCF dataset starts at 2000), leaving only MPB that happened in that year. Maybe there wasn't much MPB before this, so that assumption is ok?
# Outstanding: should stop at VCF_since_fire5 because the sample size starts to go down? Can account for this somehow?
# "_0 is the VCF from JD065 of the following year since the fire happened
### Could be more efficient
merged_MPB_fire_VCF$VCF_before_fire<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2000.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2009, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2010, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2011, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2012, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2013, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2014, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2015, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire0<-
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2000.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2009, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2010, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2011, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2012, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2013, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2014, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire1<-
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2000.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2009, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2010, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2011, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2012, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2013, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire2<-
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2000.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2009, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2010, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2011, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2012, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire3<-
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2000.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2009, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2010, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2011, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire4<-
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2000.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2009, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2010, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire5<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2000.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2009, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire6<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2001.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2008, merged_MPB_fire_VCF$"2015.y",
NA
)))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire7<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2002.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2007, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))))
merged_MPB_fire_VCF$VCF_since_fire8<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2003.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2006, merged_MPB_fire_VCF$"2015.y",
NA
)))))))))))))
merged_MPB_fire_VCF$VCF_since_fire9<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2004.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2005, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))))
merged_MPB_fire_VCF$VCF_since_fire10<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2005.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2004, merged_MPB_fire_VCF$"2015.y",
NA
)))))))))))
merged_MPB_fire_VCF$VCF_since_fire11<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2006.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2003, merged_MPB_fire_VCF$"2015.y",
NA
))))))))))
merged_MPB_fire_VCF$VCF_since_fire12<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2007.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2002, merged_MPB_fire_VCF$"2015.y",
NA
)))))))))
merged_MPB_fire_VCF$VCF_since_fire13<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2008.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2001, merged_MPB_fire_VCF$"2015.y",
NA
))))))))
merged_MPB_fire_VCF$VCF_since_fire14<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2009.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==2000, merged_MPB_fire_VCF$"2015.y",
NA
)))))))
merged_MPB_fire_VCF$VCF_since_fire15<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2010.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==1999, merged_MPB_fire_VCF$"2015.y",
NA
))))))
merged_MPB_fire_VCF$VCF_since_fire16<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2011.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==1998, merged_MPB_fire_VCF$"2015.y",
NA
)))))
merged_MPB_fire_VCF$VCF_since_fire17<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2012.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==1997, merged_MPB_fire_VCF$"2015.y",
NA
))))
merged_MPB_fire_VCF$VCF_since_fire18<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2013.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==1996, merged_MPB_fire_VCF$"2015.y",
NA
)))
merged_MPB_fire_VCF$VCF_since_fire19<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2014.y",
ifelse(merged_MPB_fire_VCF$last_burn==1995, merged_MPB_fire_VCF$"2015.y",
NA
))
merged_MPB_fire_VCF$VCF_since_fire20<-
ifelse(merged_MPB_fire_VCF$last_burn==1994, merged_MPB_fire_VCF$"2015.y",
NA
)
head(merged_MPB_fire_VCF, n=25)
######### Recovery 0-20 years after fire ########
# The below looks at recovery 0-20 years after a fire relative to the pre-fire state
# recovery is defined as difference between pre-fire VCF and post-fire VCF (0-20 years after fire)
# Do this rather than just VCF bc bias (i.e., no MPB areas could have lower pre- and post-fire VCF because they're grassland, whereas MPB infested areas are going to be forest)
# Outstanding: bias bc can only look at fires since 2000 since VCF dataset starts at 2000 (therefore no pre-fire dataset before this)
# "_0 is the VCF from JD065 of the following year since the fire happened
### Could be more efficient
merged_MPB_fire_VCF$pre_minus_1yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2002.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2003.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2004.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2005.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2006.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2007.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2008.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2007, (merged_MPB_fire_VCF$"2007.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2008, (merged_MPB_fire_VCF$"2008.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2009, (merged_MPB_fire_VCF$"2009.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2010, (merged_MPB_fire_VCF$"2010.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2011, (merged_MPB_fire_VCF$"2011.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2012, (merged_MPB_fire_VCF$"2012.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2013, (merged_MPB_fire_VCF$"2013.y" - merged_MPB_fire_VCF$"2015.y"),
NA
))))))))))))))
merged_MPB_fire_VCF$pre_minus_2yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2003.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2004.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2005.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2006.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2007.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2008.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2007, (merged_MPB_fire_VCF$"2007.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2008, (merged_MPB_fire_VCF$"2008.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2009, (merged_MPB_fire_VCF$"2009.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2010, (merged_MPB_fire_VCF$"2010.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2011, (merged_MPB_fire_VCF$"2011.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2012, (merged_MPB_fire_VCF$"2012.y" - merged_MPB_fire_VCF$"2015.y"),
NA
)))))))))))))
merged_MPB_fire_VCF$pre_minus_3yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2004.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2005.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2006.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2007.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2008.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2007, (merged_MPB_fire_VCF$"2007.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2008, (merged_MPB_fire_VCF$"2008.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2009, (merged_MPB_fire_VCF$"2009.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2010, (merged_MPB_fire_VCF$"2010.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2011, (merged_MPB_fire_VCF$"2011.y" - merged_MPB_fire_VCF$"2015.y"),
NA
))))))))))))
merged_MPB_fire_VCF$pre_minus_4yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2005.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2006.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2007.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2008.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2007, (merged_MPB_fire_VCF$"2007.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2008, (merged_MPB_fire_VCF$"2008.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2009, (merged_MPB_fire_VCF$"2009.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2010, (merged_MPB_fire_VCF$"2010.y" - merged_MPB_fire_VCF$"2015.y"),
NA
)))))))))))
merged_MPB_fire_VCF$pre_minus_5yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2006.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2007.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2008.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2007, (merged_MPB_fire_VCF$"2007.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2008, (merged_MPB_fire_VCF$"2008.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2009, (merged_MPB_fire_VCF$"2009.y" - merged_MPB_fire_VCF$"2015.y"),
NA
))))))))))
merged_MPB_fire_VCF$pre_minus_6yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2007.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2008.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2007, (merged_MPB_fire_VCF$"2007.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2008, (merged_MPB_fire_VCF$"2008.y" - merged_MPB_fire_VCF$"2015.y"),
NA
)))))))))
merged_MPB_fire_VCF$pre_minus_7yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2008.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2007, (merged_MPB_fire_VCF$"2007.y" - merged_MPB_fire_VCF$"2015.y"),
NA
))))))))
merged_MPB_fire_VCF$pre_minus_8yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2009.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2006, (merged_MPB_fire_VCF$"2006.y" - merged_MPB_fire_VCF$"2015.y"),
NA
)))))))
merged_MPB_fire_VCF$pre_minus_9yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2010.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2005, (merged_MPB_fire_VCF$"2005.y" - merged_MPB_fire_VCF$"2015.y"),
NA
))))))
merged_MPB_fire_VCF$pre_minus_10yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2011.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2004, (merged_MPB_fire_VCF$"2004.y" - merged_MPB_fire_VCF$"2015.y"),
NA
)))))
merged_MPB_fire_VCF$pre_minus_11yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2012.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2003, (merged_MPB_fire_VCF$"2003.y" - merged_MPB_fire_VCF$"2015.y"),
NA
))))
merged_MPB_fire_VCF$pre_minus_12yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2013.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2002, (merged_MPB_fire_VCF$"2002.y" - merged_MPB_fire_VCF$"2015.y"),
NA
)))
merged_MPB_fire_VCF$pre_minus_13yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2014.y"),
ifelse(merged_MPB_fire_VCF$last_burn==2001, (merged_MPB_fire_VCF$"2001.y" - merged_MPB_fire_VCF$"2015.y"),
NA
))
merged_MPB_fire_VCF$pre_minus_14yrs_post_fire_VCF<-
ifelse(merged_MPB_fire_VCF$last_burn==2000, (merged_MPB_fire_VCF$"2000.y" - merged_MPB_fire_VCF$"2015.y"),
NA
)
####################
write.csv(merged_MPB_fire_VCF, "merged_MPB_fire_VCF.csv")
####################
merged_MPB_fire_VCF<-read.csv("merged_MPB_fire_VCF.csv", header=TRUE)
merged_MPB_fire_VCF<-merged_MPB_fire_VCF[,-1]
# Just for pixels that were 'forest' prefire (39.3 VCF) to make sure no bias in pre-fire VCF between MPB infestationa and not
# VCF threshold that represents forest: 49.4 +/- 10.1; including sparse forest: 47.5 +/- 10.2. This was derived from selecting 100 forest GCPs (plus 25? 'sparse forest' GCPs) in Google Earth (imagery from 2012) and computing stats on VCF 2012 sampled at those points
# If we do this, we're only looking at fires 2000-2015
forest<-merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=39.3, ]
# Make sure no bias: that pre-fire MPB and pre-fire no MPB pixels have same distribution of pre-fire VCF. # To detect pre-fire bias in VCF, what's the mean VCF of pixels the year before a fire for pixels that have also had MPB vs those that have not?
t.test(merged_MPB_fire_VCF[merged_MPB_fire_VCF$yrs_infest_bf_fire>=0,]$VCF_before_fire, merged_MPB_fire_VCF[merged_MPB_fire_VCF$yrs_infest_bf_fire<0,]$VCF_before_fire, na.action="na.pass")
# They are sig diff, but only by 1.4% VCF
boxplot(merged_MPB_fire_VCF[merged_MPB_fire_VCF$yrs_infest_bf_fire>=0,]$VCF_before_fire, merged_MPB_fire_VCF[merged_MPB_fire_VCF$yrs_infest_bf_fire<0,]$VCF_before_fire, names=c("Infestation", "No infestation"), ylab="VCF pre-fire")
t.test(forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire, forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire, na.action="na.pass")
# They are not sig diff if just look at forest pixels
boxplot(forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire, forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire, names=c("Infestation", "No infestation"), ylab="VCF pre-fire")
###############
###############
# Q1: When is forest 'recovered'?
# a. At how many years since fire does post-fire VCF Resemble pre-fire VCF?
# b. Does this vary as a function of pre-fire MPB infestation?
# c. Does this vary as a function of pre-fire VCF?
### Plot post-fire regrowth as a function of years since fire
names(forest)
VCF_post_fire<-forest[,c(65:86)]
names(VCF_post_fire)
VCF_post_fire_mean<-apply(VCF_post_fire[1:16], 2, mean, na.rm=TRUE)
VCF_post_fire_sd<-apply(VCF_post_fire[1:16], 2, sd, na.rm=TRUE)
VCF_post_fire_low<-VCF_post_fire_mean-VCF_post_fire_sd
VCF_post_fire_high<-VCF_post_fire_mean+VCF_post_fire_sd
time<-c(-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14)
VCF_post_fire_time<-data.frame(VCF_post_fire_mean, VCF_post_fire_sd,time)
plot(VCF_post_fire_time$time, VCF_post_fire_time$VCF_post_fire_mean, xlab="Time since fire (years)", ylab="VCF", ylim=range(0:75))
lines(VCF_post_fire_time$time, VCF_post_fire_mean)
lines(VCF_post_fire_time$time, VCF_post_fire_low, lty=2)
lines(VCF_post_fire_time$time, VCF_post_fire_high, xlab="Time since fire (years)", ylab="VCF", lty=2)
# What accounts for the shape of this trend?
# Carol -
# Could be post-fire die off, then 5-10 years seedling establishment, after 7-8 years no new establishment
# Paper - Yellowstone - 30 years to 'full recovery' (density)? Could use this to make case for incorporating Landsat
# ***diff pre and post-fire VCF
### Plot post-fire regrowth (relative to pre-fire VCF) as a function of years since fire
# Reminder: pre-fire VCF minus post-fire VCF (positive numbers still recovering, negative numbers exceeded pre-fire VCF)
# subset pre_minus_1yrs_post_fire_VCF to pre_minus_14yrs_post_fire_VCF
names(forest)
VCF_pre_minus_post_fire<-forest[,c(87:100)]
names(VCF_pre_minus_post_fire)
VCF_pre_minus_post_fire_mean<-apply(VCF_pre_minus_post_fire, 2, mean, na.rm=TRUE)
VCF_pre_minus_post_fire_sd<-apply(VCF_pre_minus_post_fire, 2, sd, na.rm=TRUE)
VCF_pre_minus_post_fire_low<-VCF_pre_minus_post_fire_mean-VCF_pre_minus_post_fire_sd
VCF_pre_minus_post_fire_high<-VCF_pre_minus_post_fire_mean+VCF_pre_minus_post_fire_sd
time<-c(1,2,3,4,5,6,7,8,9,10,11,12,13,14)
VCF_pre_minus_post_fire_time<-data.frame(VCF_pre_minus_post_fire_mean, time)
plot(VCF_pre_minus_post_fire_time$time, VCF_pre_minus_post_fire_time$VCF_pre_minus_post_fire_mean, xlab="Time since fire (years)", ylab = "Difference between pre- and post- fire VCF", ylim=range(-20:40))
lines(VCF_pre_minus_post_fire_time$time, VCF_pre_minus_post_fire_time$VCF_pre_minus_post_fire_mean)
lines(VCF_pre_minus_post_fire_time$time, VCF_pre_minus_post_fire_low, lty=2)
lines(VCF_pre_minus_post_fire_time$time, VCF_pre_minus_post_fire_high, lty=2)
# a. Q: At how many years since fire does post-fire VCF Resemble pre-fire VCF?
# A: More than 14
# Below, using na.pass (rather than na.exclude) is same as:
# t.test(forest[complete.cases(forest[,65:66]),]$VCF_before_fire, forest[complete.cases(forest[,65:66]),]$VCF_since_fire0, na.action="na.exclude")
# https://stat.ethz.ch/pipermail/r-help/2010-August/249146.html
### Could be more efficient
t.test(forest$VCF_before_fire, forest$VCF_since_fire0, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire1, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire2, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire3, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire4, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire5, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire6, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire7, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire8, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire9, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire10, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire11, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire12, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire13, paired=TRUE, na.action="na.pass")
t.test(forest$VCF_before_fire, forest$VCF_since_fire14, paired=TRUE, na.action="na.pass")
# Can only look 14 years post - all sig diff, so forest still not recovered 14 years out
# Q: b. Does this vary as a function of pre-fire MPB infestation?
# A: Prob not
# Beetle infestation
# Below, using na.pass, is same as:
# t.test((forest[((complete.cases(forest[,c(65,66,48)])) & forest$yrs_infest_bf_fire>=0),]$VCF_before_fire), (forest[((complete.cases(forest[,c(65,66,48)])) & forest$yrs_infest_bf_fire>=0),]$VCF_since_fire0))
### Could be more efficient
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire0), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire1), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire2), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire3), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire4), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire5), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire6), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire7), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire8), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire9), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire10), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire11), paired=TRUE, na.action="na.pass")
# all above sig diff, so forest still not recovered after 11 years
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire12), paired=TRUE, na.action="na.pass")
# Not different, so maybe recovered by 12 years after, but prob not enough obs. How many obs complete cases VCF bf and 12 years?
nrow(forest[((complete.cases(forest[,c(65,78)])) & forest$yrs_infest_bf_fire>=0),]) #10
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire13), paired=TRUE, na.action="na.pass")
# not enough obs bc no MPB before fire in 2000 or 2001
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire14)), paired=TRUE, na.action="na.pass")
# not enough obs bc no MPB before fire in 2000
# No Beetle infestation
### Could be more efficient
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire0), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire1), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire2), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire3), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire4), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire5), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire6), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire7), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire8), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire9), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire10), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire11), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire12), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire13), paired=TRUE, na.action="na.pass")
t.test((forest[forest$yrs_infest_bf_fire<0,]$VCF_before_fire), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire14), paired=TRUE, na.action="na.pass")
# all sig diff, so forest still not recovered after 14 years
### Enough obs with MPB before fire to answer this?
# Q: c. Does this vary as a function of pre-fire VCF?
# A: not really. No pre-fire VCF category 'recovers,' but mean of pre-and post-fire VCF greater diff with increasing pre-fire VCF
### Could be more efficient
# Like, WAY more efficient
# pre-fire VCF 0-19
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire0), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire1), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire2), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire3), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire4), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire5), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire6), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire7), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire8), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire9), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire10), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire11), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire12), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire13), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>0 & merged_MPB_fire_VCF$VCF_before_fire<20,]$VCF_since_fire14), paired=TRUE, na.action="na.pass")
# still not recovered after 14 years bc all sig diff, but mean diff VCF in groups is 0-2% VCF. Meaningfully different?
# pre-fire VCF 20-39
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire0), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire1), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire2), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire3), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire4), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire5), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire6), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire7), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire8), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire9), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire10), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire11), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire12), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire13), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=20 & merged_MPB_fire_VCF$VCF_before_fire<40,]$VCF_since_fire14), paired=TRUE, na.action="na.pass")
# still not recovered after 14 years bc all sig diff, but mean diff VCF in groups is <6% VCF.
# pre-fire VCF 40-59
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire0), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire1), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire2), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire3), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire4), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire5), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire6), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire7), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire8), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire9), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire10), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire11), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire12), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire13), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=40 & merged_MPB_fire_VCF$VCF_before_fire<60,]$VCF_since_fire14), paired=TRUE, na.action="na.pass")
# still not recovered after 14 years bc all sig diff, and mean diff VCF in groups is ~5-15% VCF.
# pre-fire VCF 60-79
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire0), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire1), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire2), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire3), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire4), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire5), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire6), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire7), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire8), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire9), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire10), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire11), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire12), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire13), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=60 & merged_MPB_fire_VCF$VCF_before_fire<80,]$VCF_since_fire14), paired=TRUE, na.action="na.pass")
# still not recovered after 14 years bc all sig diff, and mean diff VCF in groups is ~10-20% VCF.
# pre-fire VCF 80-100
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire0), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire1), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire2), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire3), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire4), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire5), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire6), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire7), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire8), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire9), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire10), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire11), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire12), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire13), paired=TRUE, na.action="na.pass")
t.test((merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_before_fire), (merged_MPB_fire_VCF[merged_MPB_fire_VCF$VCF_before_fire>=80 & merged_MPB_fire_VCF$VCF_before_fire<=100,]$VCF_since_fire14), paired=TRUE, na.action="na.pass")
# Not enough obs for this one
###############
###############
# Q2: Is a transition more likely to occur if fire is preceded by beetle infestation?
# Is post-fire forest recovery different between groups for each year after fire: 1) MPB infestation year of fire or any previous year 2) NOT MPB infestation year of fire or any previous year
### Post-fire regrowth as a function of years since fire
names(forest)
# Reminder: yrs_infest_bf_fire is last_burn-last_infest, so positive numbers are when MPB happened before fire
### Could be more efficient
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire0), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire0))
# Different mean**
# mean post-fire VCF of fire affected areas that have also had pre-fire MPB = 36.3, those that have not = 37.9
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire1), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire1))
# Different mean**, pre-fire MPB lower VCF
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire2), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire2))
# Different mean**, pre-fire MPB lower VCF
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire3), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire3))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire4), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire4))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire5), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire5))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire6), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire6))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire7), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire7))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire8), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire8))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire9), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire9))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire10), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire10))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire11), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire11))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire12), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire12))
# NOT Different mean, 3-12 years after
t.test((forest[forest$yrs_infest_bf_fire>=0,]$VCF_since_fire13), (forest[forest$yrs_infest_bf_fire<0,]$VCF_since_fire13))
# not enough obs for mean or distribution 13-20 years after
# **p <= 0.05
### First years post-fire, less vegetation in areas that had been MBP-affected. At 3 years post-fire, vegetation looks the same independent of whether there was MPB infestation pre-fire or not
# Plot the above
names(forest)
VCF_post_fire_MPB<-forest[forest$yrs_infest_bf_fire>=0,c(66:87)]
VCF_post_fire_noMPB<-forest[forest$yrs_infest_bf_fire<0,c(66:87)]
VCF_post_fire_MPB_mean<-apply(VCF_post_fire_MPB, 2, mean, na.rm=TRUE)
VCF_post_fire_MPB_sd<-apply(VCF_post_fire_MPB, 2, sd, na.rm=TRUE)
VCF_post_fire_MPB_low<-VCF_post_fire_MPB_mean[2:5]-VCF_post_fire_MPB_sd[2:5]
VCF_post_fire_MPB_high<-VCF_post_fire_MPB_mean[2:5]+VCF_post_fire_MPB_sd[2:5]
VCF_post_fire_noMPB_mean<-apply(VCF_post_fire_noMPB, 2, mean, na.rm=TRUE)
VCF_post_fire_noMPB_sd<-apply(VCF_post_fire_noMPB, 2, sd, na.rm=TRUE)
VCF_post_fire_noMPB_low<-VCF_post_fire_noMPB_mean[2:5]-VCF_post_fire_noMPB_sd[2:5]
VCF_post_fire_noMPB_high<-VCF_post_fire_noMPB_mean[2:5]+VCF_post_fire_noMPB_sd[2:5]
time<-c(-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
VCF_post_fire_MPB_time<-data.frame(VCF_post_fire_MPB_mean,time)
VCF_post_fire_noMPB_time<-data.frame(VCF_post_fire_noMPB_mean, time)
plot(VCF_post_fire_MPB_time$time, VCF_post_fire_MPB_time$VCF_post_fire_MPB_mean, xlab="Time since fire (years)", ylab="VCF", type="p", col="red", xlim=range(0:3), ylim=range(0:75))
lines(VCF_post_fire_MPB_time$time, VCF_post_fire_MPB_time$VCF_post_fire_MPB_mean, col="red")
lines(VCF_post_fire_MPB_time$time[2:5], VCF_post_fire_MPB_low, col="red", lty=2)
lines(VCF_post_fire_MPB_time$time[2:5], VCF_post_fire_MPB_high, col="red", lty=2)
points(VCF_post_fire_noMPB_time$time, VCF_post_fire_noMPB_time$VCF_post_fire_noMPB_mean, col="blue")
lines(VCF_post_fire_noMPB_time$time, VCF_post_fire_noMPB_time$VCF_post_fire_noMPB_mean, col="blue")
lines(VCF_post_fire_noMPB_time$time[2:5], VCF_post_fire_noMPB_low, col="blue", lty=2)
lines(VCF_post_fire_noMPB_time$time[2:5], VCF_post_fire_noMPB_high, col="blue", lty=2)
# would this look more impressive on another scale or something?
# not log
### Post-fire regrowth (relative to pre-fire VCF) as a function of years since fire
### Could be more efficient
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_1yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_1yrs_post_fire_VCF))
# Different**
# mean difference between pre-fire VCF and post-fire VCF of fire affected areas that have also had pre-fire MPB = 8.7, those that have not = 6.6
# Reminder: pre-fire VCF minus post-fire VCF (positive numbers still recovering, negative numbers exceeded pre-fire VCF)
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_2yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_2yrs_post_fire_VCF))
# Different**, mean difference greater in areas that have also had pre-fire MPB
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_3yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_3yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_4yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_4yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_5yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_5yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_6yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_6yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_7yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_7yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_8yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_8yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_9yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_9yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_10yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_10yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_11yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_11yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_12yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_12yrs_post_fire_VCF))
# NOT Different 3-12 years after
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_13yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_13yrs_post_fire_VCF))
t.test((forest[forest$yrs_infest_bf_fire>=0,]$pre_minus_14yrs_post_fire_VCF), (forest[forest$yrs_infest_bf_fire<0,]$pre_minus_14yrs_post_fire_VCF))
# not enough obs 13-14 years after
# **p <= 0.05
### First years post-fire, recovery to a pre-disturbance state greater in areas that had NOT been MPB-affected. At 3 years post-fire, recovery looks the same independent of whether there was MPB infestation post-fire or not
# Plot the above
names(forest)
VCF_pre_post_fire_MPB<-forest[forest$yrs_infest_bf_fire>=0,c(87:100)]
VCF_pre_post_fire_noMPB<-forest[forest$yrs_infest_bf_fire<0,c(87:100)]
VCF_pre_post_fire_MPB_mean<-apply(VCF_pre_post_fire_MPB, 2, mean, na.rm=TRUE)
VCF_pre_post_fire_MPB_sd<-apply(VCF_pre_post_fire_MPB, 2, sd, na.rm=TRUE)
VCF_pre_post_fire_MPB_low<-VCF_pre_post_fire_MPB_mean[1:3]-VCF_pre_post_fire_MPB_sd[1:3]
VCF_pre_post_fire_MPB_high<-VCF_pre_post_fire_MPB_mean[1:3]+VCF_pre_post_fire_MPB_sd[1:3]
VCF_pre_post_fire_noMPB_mean<-apply(VCF_pre_post_fire_noMPB, 2, mean, na.rm=TRUE)
VCF_pre_post_fire_noMPB_sd<-apply(VCF_pre_post_fire_noMPB, 2, sd, na.rm=TRUE)
VCF_pre_post_fire_noMPB_low<-VCF_pre_post_fire_noMPB_mean[1:3]-VCF_pre_post_fire_noMPB_sd[1:3]
VCF_pre_post_fire_noMPB_high<-VCF_pre_post_fire_noMPB_mean[1:3]+VCF_pre_post_fire_noMPB_sd[1:3]
time<-c(1,2,3,4,5,6,7,8,9,10,11,12,13,14)
VCF_pre_post_fire_MPB_time<-data.frame(VCF_pre_post_fire_MPB_mean,time)
VCF_pre_post_fire_noMPB_time<-data.frame(VCF_pre_post_fire_noMPB_mean,time)
plot(VCF_pre_post_fire_MPB_time$time, VCF_pre_post_fire_MPB_time$VCF_pre_post_fire_MPB_mean, xlab="Time since fire (years)", ylab="VCF", type="points", col="red", xlim=range(1:3), ylim=range(-25:50))
lines(VCF_pre_post_fire_MPB_time$time, VCF_pre_post_fire_MPB_time$VCF_pre_post_fire_MPB_mean, col="red")
lines(VCF_pre_post_fire_MPB_time$time[1:3], VCF_pre_post_fire_MPB_low, col="red", lty=2)
lines(VCF_pre_post_fire_MPB_time$time[1:3], VCF_pre_post_fire_MPB_high, col="red", lty=2)
points(VCF_pre_post_fire_noMPB_time$time, VCF_pre_post_fire_noMPB_time$VCF_pre_post_fire_noMPB_mean, col="blue")
lines(VCF_pre_post_fire_noMPB_time$time, VCF_pre_post_fire_noMPB_time$VCF_pre_post_fire_noMPB_mean, col="blue")
lines(VCF_pre_post_fire_noMPB_time$time[1:3], VCF_pre_post_fire_noMPB_low, col="blue", lty=2)
lines(VCF_pre_post_fire_noMPB_time$time[1:3], VCF_pre_post_fire_noMPB_high, col="blue", lty=2)
# would this look more impressive on another scale or something?
###############
# NEXT / further thought:
# Confounding factors - Climate, elevation / slope / aspect
# If fire was in drought year / high fire incidence year
# precip figure - fire severity
# Elevation, slope aspect - recovery
# integrate other infestation in here
# instead of tests for each year, one test with pixel as random effect?
# FRP / severity for each fire / dNBR or rdNBR value
# Because pre-fire VCF higher in MPB-infested pixels as not MPB-infested pixels (but mimimally), then what does this mean?
# Q3: How many years after beetle infestation does a fire have a similar recovery trajectory as an area that did not experience infestation? (years between beetle and fire)
# Evaluate this trend as a function of time between MPB infestation and fire. VCF 0-20 years post-fire for 0-x years between MPB infestation and fire
# buffer around fire and MPB (i.e., expand the area slightly to account for error)
###############
# New for Carol:
# Q. "recovery?
# A: "not recovered after 14 years,
# A: Does this vary as a function of pre-fire MPB? Still not recovered after 14 years, independent of previous MPB (but not lot of MPB obs)
# A: Does this vary as a function of pre-fire VCF? Not really. No pre-fire VCF category 'recovers,' but mean of pre-and post-fire VCF greater diff with increasing pre-fire VCF
# If restricting analysis to pre-fire forest pixels, no diff in pre-fire VCF for MPB-infested and not MPB-infested pixels
##################################################
#################### SCRATCH####################
##################################################
###############
|
005fa17f202717f0fc92ba502a1848e7618ad116
|
408220de1e7ad6a66fee48b86522bbb0eef9759f
|
/barcodes_file/barcodes_check.R
|
8a09ad3b52b544c707dc9be88cc0e2ed404e5979
|
[] |
no_license
|
Wolflab/erisor
|
942414f5b182a56f8ebd5106960513a44fdca522
|
7bda5af88e8a0ad0aa0e7a91abff0f81a8025ebc
|
refs/heads/master
| 2020-06-24T23:14:22.572548
| 2017-08-03T12:43:13
| 2017-08-03T12:43:13
| 96,948,658
| 0
| 0
| null | 2017-07-12T00:32:48
| 2017-07-12T00:32:48
| null |
UTF-8
|
R
| false
| false
| 370
|
r
|
barcodes_check.R
|
#setwd('/Users/jimblotter/Desktop/Grad_School/Data_Analysis/erisor/barcodes_file/')
install.packages(compare)
library(compare)
before <- read.csv("before.csv", header = FALSE)
after <- read.csv("after.csv", header = FALSE)
before
after
for(i in before[1,]){
if(i == after[]){
if i %in% after{
print("oops")
}
} #i is not in after, print "oops"
}
|
5a6ddcad7fdd879f72d289a9ce73b416048939e2
|
8a270978e710878945f37852d0be9f73cfa75078
|
/other/fussballdaten.R
|
ea2e6a330fb29fadbdca193bd153f7df4e31f137
|
[] |
no_license
|
bydata/football_data
|
bdcacdfff7d8d099aaf93637a0f131c48462ae01
|
44e59cd8349f2a02df983b0d16eafc37fbed0e4e
|
refs/heads/master
| 2023-07-08T02:20:20.089361
| 2023-06-30T15:22:04
| 2023-06-30T15:22:04
| 145,601,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,111
|
r
|
fussballdaten.R
|
library(tidyverse)
library(rvest)
library(parallel)
library(ggthemes)
# retrieve page content - takes either a URL string or a vector of strings which constitute a url (will be collapsed to one string using "/")
get_content <- function(url) {
if (is.vector(url)) {
url <- str_c(url, collapse = "/")
}
#content <- read_html(url, options = c("RECOVER"))
tryCatch({content <- read_html(url)},
error = function(e) {
warning(e)
return(NULL)
})
content
}
#https://www.fussballdaten.de/vereine/borussia-dortmund/1995/kader/
scrape_fussballdaten_squad <- function(team, league, year = 2019) {
# team <- "borussia-dortmund"
# league <- "bundesliga"
# year <- 2019
request_url <- str_c("https://www.fussballdaten.de/vereine", team, year, "kader", sep = "/")
raw_table <- get_content(request_url) %>% html_node(css = "table.verein-kader")
# parse table
t <- raw_table %>% html_table(fill = TRUE)
colnames(t) <- c("jersey", "name", "X1", "position", "age", "height", "weight", "games",
"goals", "assists", "own_goals", "booked", "sent_off_yellow", "sent_off_red",
"subbed_in", "subbed_out", "minutes", "rating", "X2", "X3")
t <- t %>% select(-X1, -X2, -X3) # country is empty
t <- t %>% filter(!str_detect(jersey, "Spieler:"))
# get nationality from flags (looks a bit overcomplicated but we need to catch edge cases with missing <b> elements containing the flag)
flag_cells <- html_nodes(raw_table, xpath = "//tr[*]/td[3]")
flags <- map(flag_cells, ~ html_node(.x, css = "span.flag-icon") %>% html_attr("title")) %>% unlist()
# be careful, the vector contains the nationalities of the coaching staff as well, first line is empty
players_n <- nrow(t)
flags <- flags[2:(players_n+1)]
# convert strings to integers
convert_str2int <- function(s) {
as.numeric(str_replace(s, "-", "0"))
}
if (is.null(flags) || is.null(t)) {
return(NULL)
}
# merge
squad <- cbind(league, year, team, t, flags) %>%
rename(flag = flags) %>%
mutate(jersey = as.numeric(jersey),
#age = as.numeric(age),
games = convert_str2int(games),
goals = convert_str2int(goals),
#assists = convert_str2int(assists),
height = as.numeric(str_replace(height, ",", ".")),
weight = as.numeric(str_replace(weight, ",", ".")),
rating = convert_str2int(rating),
rating = ifelse(rating == 0, NA, rating)
)
squad
}
# returns a character vector of teams for given league and year
scrape_fussballdaten_teams <- function(league, year = 2019) {
url <- str_c("https://www.fussballdaten.de", league, year, "tabelle", sep = "/")
team_urls <- get_content(url) %>%
html_node(css = "div#myTab_tabellen-tab0") %>%
html_nodes(css = "a.table-link") %>%
html_attr("href")
teams <- str_match(team_urls, "/vereine/(.+?)/(\\d{4})/")[, 2:3]
teams
}
scrape_fussballdaten_squads_parallel <- function(leagues) {
team_names <- vector("list", length(leagues))
team_squads <- vector("list", length(leagues))
for (i in 1:length(leagues)) {
message(leagues[i])
tryCatch(
{ team_names[[i]] <- scrape_fussballdaten_teams(leagues[i]) },
error = function(e) {
warning(e)
return(team_squads)
}
)
no_of_teams <- nrow(team_names[[i]])
team_squads[[i]] <- vector("list", no_of_teams)
for (j in 1:no_of_teams) {
message(str_c("|__", j, team_names[[i]][[j]], sep = " "))
tryCatch(
{team_squads[[i]][[j]] <- scrape_fussballdaten_squad(team = team_names[[i]][[j]], league = leagues[i])},
error = function(e) {
warning(e)
return(team_squads)
}
)
}
}
team_squads
}
teams1 <- scrape_fussballdaten_teams("bundesliga")
teams1
league1 <- scrape_fussballdaten_squad("borussia-dortmund", "bundesliga")
# run queries for selected leagues
system.time( {
no_cores <- detectCores() - 1
cl <- makeCluster(no_cores)
clusterExport(cl, as.list(unique(c(ls(.GlobalEnv),ls(environment())))),envir=environment())
clusterEvalQ(cl,
{library(tidyverse)
library(rvest)}
)
leagues <- c("bundesliga", "irland", "frankreich", "italien", "england", "belgien", "bulgarien",
"daenemark", "finnland", "griechenland", "israel", "kroatien", "niederlande",
"norwegen", "oesterreich", "polen", "portugal", "rumaenien", "russland", "schottland",
"schweden", "schweiz", "serbien", "spanien", "tschechien", "tuerkei", "ukraine", "ungarn")
result <- parLapply(cl, leagues, scrape_fussballdaten_squads_parallel)
stopCluster(cl)
cl <- NULL
})
# format player data
players <- data.table::rbindlist(flatten(flatten(result))) %>%
mutate(league = as.character(league),
team = as.character(team),
flag = as.character(flag),
country = ifelse(league == "bundesliga", "Deutschland", str_to_title(league, locale = "de")),
country = ifelse(country == "Daenemark", "Dänemark", country),
country = ifelse(country == "Tuerkei", "Türkei", country),
country = ifelse(country == "Oesterreich", "Österreich", country),
country = ifelse(country == "Irland", "Republik Irland", country),
country = ifelse(country == "Rumaenien", "Rumänien", country),
country = ifelse(country == "Tschechien", "Tschechische Rep.", country)
)
saveRDS(players, "players.RData")
# get uefa coefficient rankings (page: https://de.uefa.com/memberassociations/uefarankings/country/#/yr/2018 //
# request in background: https://de.competitions.uefa.com/memberassociations/uefarankings/country/libraries//years/2017/)
scrape_uefa_coefficients <- function(year) {
request_url <- str_c("https://de.competitions.uefa.com/memberassociations/uefarankings/country/libraries//years/", year)
html_content <- get_content(request_url)
t <- html_node(html_content, css = "table.table--standings") %>%
html_table( fill = TRUE) %>%
select(country = Land, position = Pos, coefficient = `Pkt.`, no_of_teams = Vereine) %>%
mutate(country = str_trim(str_sub(country, 5, (str_length(country) - 5) / 2 + 3)), # remove prefix, and keep only the first of the duplicated country names
position = as.numeric(position),
coefficient = as.numeric(str_replace(coefficient, ",", ".")),
no_of_teams = as.numeric(ifelse(str_detect(no_of_teams, "/"), stringi::stri_match_last(no_of_teams, regex = "\\d+"), no_of_teams)) # in rare cases, the number of teams is "1/8" or "1/9". Only take the last number
) %>%
cbind(year)
t
}
coefficients_tbl <- map_dfr(c(1997:2003, 2005:2019), scrape_uefa_coefficients) %>% # 2004 data is missing on the UEFA website
as_tibble()
## EXPLORATION: players
countries <- players %>% count(country)
countries
players %>%
group_by(league) %>%
summarize(jersey_mean = mean(jersey, na.rm = TRUE),
jersey_sd = sd(jersey, na.rm = TRUE)
) %>%
arrange(desc(jersey_mean))
# missing shirt numbers
players %>%
filter(is.na(jersey)) %>%
count(country) %>%
arrange(desc(n))
# missing shirt numbers but matches played
players %>%
filter(is.na(jersey), games > 0) %>%
count(country) %>%
arrange(desc(n))
# teams seem to have A LOOOOT of players in their roster
players %>%
count(country, team) %>%
group_by(country) %>%
summarize(med_players_team = median(n)) %>%
arrange(desc(med_players_team))
# keep only players with a non-missing shirt number
players_cleaned <- players %>%
filter(!is.na(jersey))
# number of matches per player and country
players %>%
group_by(country) %>%
summarize(avg_no_matches = mean(matches)) %>%
arrange(avg_no_matches)
## EXPLORATION: UEFA coefficients
# there is an odd dip in (mean) coefficients between 2005 and 2007 - standardize coefficients in order to avoid artefacts
coefficients_tbl %>%
group_by(year) %>%
summarize(coeff_mean = mean(coefficient),
coeff_sd = sd(coefficient)) %>%
ggplot(aes(year, coeff_mean)) +
geom_line()
# z-standardize coefficients by year
coefficients_tbl <- coefficients_tbl %>%
group_by(year) %>%
mutate(coefficient_z = scale(coefficient)) %>%
ungroup()
# check if z-standardization worked (mean = 0, sd = 1)
coefficients_tbl %>%
group_by(year) %>%
summarize(coeff_z_mean = round(mean(coefficient_z)),
coeff_z_sd = round(sd(coefficient_z)))
coefficients_tbl %>%
filter(country %in% c("Spanien", "Deutschland", "England", "Italien", "Frankreich")) %>%
ggplot(aes(year, coefficient_z, col = country)) +
geom_line() +
coord_cartesian(ylim = c(0, max(coefficients_tbl$coefficient_z) + 1)) +
theme_hc() + scale_color_hc()
#devtools::install_github("thomasp85/gganimate")
library(gganimate)
coefficients_ordered <- coefficients_tbl %>%
mutate(country = ifelse(country == "Tschechische Rep.", "Tschechien", country)) %>%
group_by(year) %>%
mutate(rank_no = rank(-coefficient, ties = "first")) %>%
ungroup() %>%
arrange(year, rank_no) %>%
select(year, rank_no, country, everything())
# how many countries to display on chart
display_countries_n <- 10
# country colours
countries <- coefficients_ordered %>% distinct(country) %>% arrange(country) %>% pull(country)
(countries_n <- length(countries))
# English translations
countries_en <- c(
"Albania", "Andorra", "Armenia", "Azerbaidzhan", "Belarus",
"Belgium", "Bosnia-Herzegovina", "Bulgaria", "Denmark", "Germany",
"England", "Estonia", "Färöer", "Finland", "France",
"Georgia", "Gibraltar", "Greece", "Iceland", "Israel",
"Italy", "Kazakstan", "Kosovo", "Croatia", "Latvia",
"Liechtenstein", "Lithania", "Luxembourg", "Malta", "Moldavia",
"Montenegro", "Netherlands", "Northern_Ireland", "North_Mazedonia", "Norway",
"Austria", "Poland", "Portugal", "Ireland", "Romania",
"Russia", "San Marino", "Scotland", "Sweden", "Switzerland",
"Serbia", "Slovakia", "Slovenia", "Spain", "Czech_Republic",
"Turkey", "Ukraine", "Hungary", "Wales", "Cyprus"
)
country_translations <- bind_cols(country_de = countries, country_en = countries_en)
coefficients_ordered <-
coefficients_ordered %>%
left_join(country_translations, by = c("country" = "country_de"))
country_colors = c(
rep("#999999", countries_n)
)
names(country_colors) <- countries
country_colors
country_colors["Deutschland"] <- "black"
country_colors["Spanien"] <- "yellow"
country_colors["England"] <- "red"
country_colors["Frankreich"] <- "blue"
country_colors["Italien"] <- "green"
country_colors["Belgien"] <- "#555555"
#country_colors[""] <- ""
# flags
#img_germany <- readPNG(system.file("flags", "germany.png", package="png"))
# img_germany <- readPNG("flags/germany.png")
# flag_germany <- rasterGrob(img_germany, interpolate=TRUE)
coefficients_ordered_2003 <- coefficients_ordered %>%
filter(year == 2003)
# fake it
coefficients_ordered_2004 <- coefficients_ordered_2003 %>%
mutate(year = 2004)
coefficients_ordered_fixed <- coefficients_ordered %>%
bind_rows(coefficients_ordered_2004)
# create graph
p <- coefficients_ordered_fixed %>%
filter(rank_no <= display_countries_n) %>%
ggplot(aes(rank_no, group = country_en, fill = country_en)) +
geom_tile(aes(y = coefficient/2, height = coefficient),
width = 0.8, color = NA, alpha = 0.7, show.legend = FALSE) +
geom_text(aes(label = str_c(" ", country_en)), y = 1, size = 5, vjust = 0.5, hjust = 0, parse = TRUE) +
geom_text(aes(label = sprintf("%.2f", coefficient), x = rank_no, y = coefficient + 3), vjust = 0) +
coord_flip(clip = "off") +
scale_x_reverse(breaks = coefficients_ordered_fixed$rank_no) +
labs(
title = "UEFA Coefficients Ranking",
subtitle = paste("Top", display_countries_n, "Leagues", "({closest_state})"),
caption = "Source: UEFA.com",
y = NULL, x = NULL
) +
scale_fill_viridis_d(option = "D") +
guides(fill = NULL) +
theme_hc() +
theme(
axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.border = element_blank(),
#plot.margin = unit(c(0.5, 0.5, 0.5, 2), "cm"),
plot.title = element_text(size = 25, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0)
)
# animation
fps <- 12
years_n <- (2019 - 1997 - 1)
anim <- p + transition_states(year, wrap = FALSE) + ease_aes("linear")
animate(anim, nframes = years_n * fps, fps = fps, width = 800, height = 600, end_pause = 4 * fps, renderer = av_renderer())
anim_save("uefa_coefficients.mp4")
# biggest improvements in (scaled) ratings
coefficients_tbl %>%
filter(year == min(year) | year == max(year)) %>%
mutate(year = ifelse(year == min(year), "min", "max")) %>%
arrange(country, year) %>%
spread(year, coefficient_z, sep = "_") %>%
group_by(country) %>%
mutate(year_min = min(year_min, na.rm = TRUE),
year_max = max(year_max, na.rm = TRUE),
diff = year_max - year_min
) %>%
ungroup() %>%
distinct(country, diff) %>%
arrange(desc(diff))
#####
# join shirt number stats per country with UEFA coefficient
joined <- players %>%
group_by(country, year) %>%
summarize(shirt_mean = mean(shirt_number, na.rm = TRUE),
shirt_sd = sd(shirt_number, na.rm = TRUE)
) %>%
arrange(desc(shirt_mean)) %>%
inner_join(coefficients_tbl, by = c("country", "year"))
# check which countries remained from LHS
joined %>%
distinct(country)
# check which countries are missing
players %>%
anti_join(coefficients_tbl, by = c("country", "year")) %>%
count(country)
ggplot(joined, aes(coefficient_z, shirt_mean, col = country)) +
geom_point(aes(size = shirt_sd)) +
coord_cartesian(xlim = c(-1, 4), ylim = c(6, 40)) +
labs(size = "Standard deviation of shirt numbers", col = "Country") +
ggtitle("Mean shirt number by standardized UEFA coefficient") +
ggthemes::theme_fivethirtyeight()
ggplot(joined, aes(coefficient, shirt_mean, col = country)) +
geom_point(aes(size = shirt_sd)) +
coord_cartesian(ylim = c(6, 40)) +
labs(size = "Standard deviation of shirt numbers", col = "Country") +
ggtitle("Mean shirt number by UEFA coefficient") +
ggthemes::theme_fivethirtyeight()
ggsave("shirt_numbers_by_uefacoefficient.png")
ggplot(joined, aes(coefficient, shirt_sd, col = factor(country))) +
geom_point() +
ggthemes::theme_fivethirtyeight()
cor(joined$coefficient_z, joined$shirt_mean)
cor(joined$coefficient_z, joined$shirt_sd)
mod1 <- lm(shirt_mean ~ coefficient_z, joined)
summary(mod1)
mod1 <- lm(shirt_mean ~ coefficient_z + no_of_teams, joined)
summary(mod1)
# quantify the number of shirt numbers exceeding the total number of players within each team
players %>%
group_by(team) %>%
filter(!is.na(shirt_number)) %>%
mutate(no_of_players = n(),
diff = shirt_number - no_of_players) %>%
filter(matches > 0) %>% # filter separately not to confound the number of players in the squad
filter(team == "ac-mailand") %>%
filter(shirt_number > no_of_players)
big_shirts <- players %>%
group_by(country, year, team) %>%
filter(!is.na(shirt_number)) %>%
mutate(no_of_players = n(),
diff = shirt_number - no_of_players) %>%
filter(matches > 0 & shirt_number > no_of_players) %>%
summarize(players_w_big_shirts = n(),
median_diff = median(diff)
) %>%
summarize(median_big_shirts = median(players_w_big_shirts),
median_diff = median(median_diff)
)
big_shirts_coeffs <- big_shirts %>%
inner_join(coefficients_tbl, by = c("country", "year"))
ggplot(big_shirts_coeffs, aes(coefficient_z, median_diff, col = country)) +
geom_point(aes(size = median_big_shirts)) +
ggthemes::theme_fivethirtyeight()
ggplot(big_shirts_coeffs, aes(coefficient_z, median_big_shirts, col = country)) +
geom_point(aes(size = median_diff)) +
ggthemes::theme_fivethirtyeight()
ggplot(big_shirts_coeffs, aes(no_of_teams, median_big_shirts, col = country)) +
geom_point(aes(size = median_diff)) +
ggthemes::theme_fivethirtyeight()
cor(big_shirts_coeffs$coefficient_z, big_shirts_coeffs$median_diff)
cor(big_shirts_coeffs$coefficient_z, big_shirts_coeffs$median_big_shirts)
# quantify the number of shirt numbers exceeding 50
(shirt50 <- players %>%
group_by(country, year) %>%
filter(!is.na(shirt_number)) %>%
mutate(shirt50 = (shirt_number > 50)) %>%
summarize(shirt50_share = mean(shirt50)) %>%
arrange(desc(shirt50_share))
)
players %>%
filter(country == "Belgien" & team == "aa-gent") %>%
arrange(desc(shirt_number))
shirt50_coeff <- shirt50 %>%
inner_join(coefficients_tbl, by = c("country", "year"))
ggplot(shirt50_coeff, aes(coefficient, shirt50_share)) +
geom_point(size = 2) +
ggthemes::theme_fivethirtyeight()
cor(shirt50_coeff$shirt50_share, shirt50_coeff$coefficient)
|
9af8809727b23a2c4e47d77de2ce98648fffb440
|
96dac3b379db632cc577600f1041ecafbddca400
|
/scripts that do not actuially work/data processing script for tracking.R
|
53744e89843d3cc6f2998c08d9b62ac42eaf5d0b
|
[] |
no_license
|
kaye11/Some-R-scripts
|
78e53b0c37254945120fca91255801b392835cb1
|
632b16a3269c7ce5c7c14efceb26fb02bf66eac1
|
refs/heads/master
| 2021-01-23T06:44:20.200098
| 2016-09-01T18:56:25
| 2016-09-01T18:56:25
| 21,462,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
data processing script for tracking.R
|
#Changing directory
getwd()
setwd("D:\\Karen's\\PhD\\R program")
getwd()
##Rename Data
t1<- trackdata2
## GrossDistance (GD)
GD <- aggregate( V ~ A , data = t1 , sum , na.rm = TRUE )
## Computing the NetDistance (ND)
## Split the data
dfs <- split(t1,t1$A)
## calculation
NDtemp1 <- sapply( dfs , function(x) dist( x[,c("X","Y")] , diag = TRUE)
[1:nrow(x)-1], simplify = TRUE, USE.NAMES = TRUE )
## Convert to usable data and append to dataset
NDtemp2=as.matrix(NDtemp1)
NDtemp3<-unsplit(NDtemp2, t1$A)
## Ignore warnings from Unsplit
ND=as.matrix(NDtemp3)
NM1<-cbind(t1, ND)
## NetDistanceSquared (ND^2)
ND2=ND*ND
newmatrix<-cbind(NM1, ND2)
## Export completed dataset
write.table(newmatrix, "d:/Karen's/PhD/R/Processed_data/newmatrix.txt", sep="\t")
plot(0)
title(main="DO NOT FORGET TO RENAME THE FILE!!!", col.main="red")
print("DO NOT FORGET TO RENAME THE FILE!!!")
|
bbe3e3d4fc3aeb8dcbbad48dff599b97e220bc18
|
53430551f5f65103243e349f27a8283c5f54ec98
|
/pollutantmean.R
|
67526f0fecac0e5518f149860e7867e8df0ca6df
|
[] |
no_license
|
rserran/ProgAssignment-1
|
3761bfd1ee4a64ec408d5ce94a4b5633123f118c
|
26399cacf0053aa18cf0c42cfef3d5fd35b479a8
|
refs/heads/master
| 2020-04-17T12:25:51.723626
| 2019-01-19T19:09:59
| 2019-01-19T19:09:59
| 166,578,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 822
|
r
|
pollutantmean.R
|
## polluttant function calculates the mean of the poluutant selected from
## specdata directory monitors selected (id)
## the function assumes directory argument is located in the R working directory
pollutantmean <- function(directory, pollutant, id = 1:332) {
path <- paste(getwd(), directory, sep = "/")
files <- c(paste("00", 1:9,".csv", sep = ""),
paste("0", 10:99,".csv", sep = ""),
paste(100:332,".csv", sep = "")
)
## create first file path to read.csv
file1 <- files[id[1]]
data <- read.csv(paste(path, file1, sep = "/"))
if(length(id) != 1) {
for (i in 2:length(id)) {
data <- rbind(data, read.csv(paste(path, files[id[i]], sep = "/")))
}
}
mean(data[[pollutant]], na.rm = TRUE)
}
|
9d3fc6d12dbc03d80df9b5c6771eed41cea3262f
|
c9f1434aaae3b1606acb71ad5594ba2ef2d7a233
|
/R/rlba.R
|
c47d08498589d0a63d9ab26d17473cedd3c483ba
|
[] |
no_license
|
cran/glba
|
84a61aee7b31416a47352ad04e95961b1a299368
|
2e43a7bd8ce543cf92056ad7a85d47d74f14b354
|
refs/heads/master
| 2022-05-21T14:29:01.142929
| 2022-05-02T12:01:52
| 2022-05-02T12:01:52
| 30,880,069
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
rlba.R
|
rlba <-
function(n,b,A,vs,s,t0,st0=0,truncdrifts=TRUE){
n.with.extras=ceiling(n*(1+3*prod(pnorm(-vs))))
drifts=matrix(rnorm(mean=vs,sd=s,n=n.with.extras*length(vs)),ncol=length(vs),byrow=TRUE)
if (truncdrifts) {
repeat {
drifts=rbind(drifts,matrix(rnorm(mean=vs,sd=s,n=n.with.extras*length(vs)),ncol=length(vs),byrow=TRUE))
tmp=apply(drifts,1,function(x) any(x>0))
drifts=drifts[tmp,]
if (nrow(drifts)>=n) break
}
}
drifts=drifts[1:n,]
drifts[drifts<0]=0
starts=matrix(runif(min=0,max=A,n=n*length(vs)),ncol=length(vs),byrow=TRUE)
ttf=t((b-t(starts)))/drifts
rt=apply(ttf,1,min)+t0+runif(min=-st0/2,max=+st0/2,n=n)
resp=apply(ttf,1,which.min)
data.frame(resp=resp,rt=rt)
}
|
8046d060517bd0fe018ad2d9565d62e7271d18d6
|
875c89121e065a01ffe24d865f549d98463532f8
|
/man/liveArrayTimes.Rd
|
3f78ca53dc2d448b812db6a839ab0aff83f79d4f
|
[] |
no_license
|
hugomflavio/actel
|
ba414a4b16a9c5b4ab61e85d040ec790983fda63
|
2398a01d71c37e615e04607cc538a7c154b79855
|
refs/heads/master
| 2023-05-12T00:09:57.106062
| 2023-05-07T01:30:19
| 2023-05-07T01:30:19
| 190,181,871
| 25
| 6
| null | 2021-03-31T01:47:24
| 2019-06-04T10:42:27
|
R
|
UTF-8
|
R
| false
| true
| 437
|
rd
|
liveArrayTimes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load.R
\name{liveArrayTimes}
\alias{liveArrayTimes}
\title{Assign live times to arrays}
\usage{
liveArrayTimes(arrays, deployments, spatial)
}
\arguments{
\item{arrays}{The array list}
\item{deployments}{the deployments list}
\item{spatial}{The spatial list}
}
\value{
an expanded array list
}
\description{
Assign live times to arrays
}
\keyword{internal}
|
d0d9a61779389ee86a6de1f50b37357fa7c3a175
|
5adc0dfe6cae8f90cc20cd149bf03852b0396e34
|
/tests/testthat/test_clean_projlead.R
|
0c36f4c4a85afc7f6052ab1fde34c0126808161a
|
[
"MIT"
] |
permissive
|
AGROFIMS/ragrofims
|
43664011980affa495c949586bde192d08d4b48e
|
bc560a62c19c30bbc75615a19a4b9f8a235f7ddf
|
refs/heads/master
| 2023-02-21T08:49:34.989861
| 2021-01-20T16:22:48
| 2021-01-20T16:22:48
| 277,626,238
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,254
|
r
|
test_clean_projlead.R
|
library(ragapi)
library(ragrofims)
context("Test for clean and get metadata from project lead")
test_that("Test get_projlead_metadata for testq0 - API ver. 233 - no combos", {
out <- get_projlead_metadata(studyId = 3,format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version ="/0233/r")
testthat::expect_equal(ncol(out), 0)
testthat::expect_equal(nrow(out), 0)
})
test_that("Test get_projlead_metadata for testq5 - API ver. 233 - 1 other combo", {
out <- get_projlead_metadata(studyId = 7,format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version ="/0233/r")
testthat::expect_equal(ncol(out), 2)
testthat::expect_equal(nrow(out), 2)
})
test_that("Test get_projlead_metadata for testq6 - API ver. 233 - 1 filled combo", {
out <- get_projlead_metadata(studyId = 8,format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version ="/0233/r")
testthat::expect_equal(ncol(out), 2)
testthat::expect_equal(nrow(out), 2)
})
|
e218ddf0ebe53cfb9c9bfcfa33d212c36709d147
|
9dc507cc478cccf7bc4c94dd46699e308a93b08f
|
/PRC/prc_raw.v3.R
|
d76414ae8a321ac1274e2a678ddc6839ef31146b
|
[] |
no_license
|
Lupenrein/R-scripts_MA
|
9b12bba332cf91cfd33fd73e96f6ebfc5c85d9ac
|
a13e25a67d9883715f8ca38c3a12db181680b370
|
refs/heads/main
| 2023-04-07T07:03:26.099960
| 2021-03-29T06:41:37
| 2021-03-29T06:41:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,663
|
r
|
prc_raw.v3.R
|
## Script for a principal response curve without statistics
## Requirements for datafile:
## - populations (type) coded as follows: 0 = T0rb (reference), 1 = T2rb, 2 = T0ina, 3 = T2ina, 4 = T2cur
## Script by Natalie Dallmann, Institute for environmental research, RWTH
## Version 3 (24.02.2021)
## Load required packages
install.packages("vegan")
library("vegan")
## set working directory
#setwd(choose.dir())
## load data
data<-read.table("pcr.data+dummy.csv", header = T, sep = ",")
data<-data[2:10]
data1<-subset(data, ind != "dummy")
## prepare data for prc
treatment<-as.factor(data1$type2)
week<-as.factor(data1$week)
ID<-as.factor(data1$ID)
l.ID<-length(ID)
n.ID<-length(unique(ID))
##Loop to shuffel ID vector: to see if the implied "repeated measurements" bias the results
filename<-paste(Sys.Date(),"_prc_raw.cor.res.txt", sep = "") ##adjust filename
sink(file = filename, append = T)
## calculate prc w/statistics
res.prc<-prc(response = data1[,5:8], treatment, week, correlation = T) ## for correlation matrix: correlation = T
## Save results
print(res.prc)
print(summary(res.prc))
## Plot results
spec<-abs(res.prc$colsum)
picname<-paste(Sys.Date(),"_prc_raw.cor.png", sep = "") ##adjust filename
png(picname, width = 1000, height = 1000, res = 100)
plot(res.prc, scaling = 0, lty = c(1, 2, 1, 1), col = c("royalblue4", "green", "green4", "red"), lwd = 2, legpos = NA)
legend("topleft", legend = c("RB(ctrl)", "RB(high)", "ina(ctrl)", "ina(high)", "cur(high)"), lty = c(1, 1, 2, 1, 1), col = c("grey","royalblue4", "green", "green4", "red"), lwd = 2)
dev.off()
sink()
|
3933d953f824fb0f1b6dae60db53fbab0e494fc0
|
41a8f96b9449fad33b54797dec9ccb1704a2c298
|
/R/utils.R
|
aa35353798603e90185dcd0d5866dfee0bfd3459
|
[] |
no_license
|
borangao/BSLMMSusie
|
c02fbf9caf04755d361f4088134ec6133b51629c
|
2cbd0d50832c2356a2426aa3c99082a8d238e284
|
refs/heads/master
| 2023-03-07T12:15:24.728644
| 2021-02-18T04:00:05
| 2021-02-18T04:00:05
| 331,758,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,551
|
r
|
utils.R
|
susie_slim = function (res)
list(alpha = res$alpha,niter = res$niter,V = res$V,sigma2 = res$sigma2)
n_in_CS_x = function (x, coverage = 0.9)
sum(cumsum(sort(x,decreasing = TRUE)) < coverage) + 1
in_CS_x = function (x, coverage = 0.9) {
n = n_in_CS_x(x,coverage)
o = order(x,decreasing = TRUE)
result = rep(0,length(x))
result[o[1:n]] = 1
return(result)
}
in_CS = function (res, coverage = 0.9) {
res = res$alpha
return(t(apply(res,1,function(x) in_CS_x(x,coverage))))
}
get_purity = function(pos, X, Xcorr, squared = FALSE, n = 100) {
if (length(pos) == 1)
c(1,1,1)
else {
if (length(pos) > n)
pos = sample(pos, n)
if (is.null(Xcorr)) {
X_sub = X[,pos]
if (length(pos) > n) {
# Remove identical columns.
pos_rm = sapply(1:ncol(X_sub),
function(i) all(abs(X_sub[,i] - mean(X_sub[,i])) <
.Machine$double.eps^0.5))
if (length(pos_rm))
X_sub = X_sub[,-pos_rm]
}
value = abs(muffled_corr(as.matrix(X_sub)))
} else
value = abs(Xcorr[pos,pos])
if (squared)
value = value^2
return(c(min(value,na.rm = TRUE),
mean(value,na.rm = TRUE),
median(value,na.rm = TRUE)))
}
}
muffled_corr = function (x)
withCallingHandlers(cor(x),
warning = function(w) {
if (grepl("the standard deviation is zero",w$message))
invokeRestart("muffleWarning")
})
|
457ba376ab9dfb5399197731beb383444be74511
|
925a1586e11c8f2dff5d43a0b2591bc0d3866aca
|
/week02-02.R
|
829ac0cccd15bbde0889c5c0952d4aae60ab2a97
|
[] |
no_license
|
znehraks/2021-1-Statistical-Analysis-With-R
|
362baf576c3c4ba46ecc425858f6c4750805eeae
|
052049f9e3004e549c5cf087fa724425c56f05d8
|
refs/heads/master
| 2023-06-02T09:23:20.376185
| 2021-06-14T17:46:51
| 2021-06-14T17:46:51
| 376,593,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
week02-02.R
|
dim(midwest)
str(midwest)
#1
midwest$ratio = midwest$popasian/midwest$poptotal
x = mean(midwest$ratio)
midwest$grade = ifelse(midwest$ratio >= x, "large", "small")
table(midwest$grade)
qplot(midwest$grade)
#2
library(dplyr)
midwest_new = midwest %>%
arrange(desc(midwest$ratio)) %>%
select(county, ratio) %>% head(10)
midwest_new
#3
write.csv(midwest_new, "asain_midwest.csv")
#--------------------------------------------------------------------------
#https://vincentarelbundock.github.io/Rdatasets/datasets.html
myData = read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/AER/CASchools.csv")
str(myData)
#read + math 평균
#평균의 평균
myData$mymean = (myData$read + myData$math) / 2
CA_mean = mean(myData$mymean)
#상위 10개
myData %>%
filter(mymean > CA_mean) %>%
arrange(desc(mymean)) %>%
head(10) %>%
select(county, school, mymean)
#하위 10개
myData %>%
filter(mymean < CA_mean) %>%
arrange(desc(mymean)) %>%
head(10) %>%
select(county, school, mymean)
|
04d797bfe558c528dd35f4a0fcdae4fef53df3ce
|
4da5c1df47a2561677163a83f74a4dd7b6bb48fd
|
/plot2.R
|
848c422eb1befb3d176a05732b50baee6d508687
|
[] |
no_license
|
jlg373/ExData_Plotting1
|
847b1d5b8b133ae31331674c49f7e4878ab1a862
|
38cd037122f68575492532d88f0d2bdd54e10aa1
|
refs/heads/master
| 2021-09-05T07:06:00.600491
| 2018-01-25T02:11:52
| 2018-01-25T02:26:23
| 118,835,178
| 0
| 0
| null | 2018-01-24T23:27:22
| 2018-01-24T23:27:21
| null |
UTF-8
|
R
| false
| false
| 1,143
|
r
|
plot2.R
|
# Plotting Assignment 1 for Exploratory Data Analysis -
# This script generates the second graphic in the assignment - global active power as a function of time.
# If appropriate data file does not exist in working directory, download and unzip.
if(!file.exists("household_power_consumption.txt")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "PowerData.zip")
unzip("PowerData.zip")
file.remove("PowerData.zip")
}
# Load required subset of data and name columns appropriately.
# To save time and memory, the entire dataset is not loaded.
header <- read.table("household_power_consumption.txt", sep = ";", nrows = 1)
data <- read.table("household_power_consumption.txt", sep = ";", skip = 66637, nrows = 2880)
colnames(data) <- unlist(header)
# Convert time to POSIXct.
data$Time <- as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
# Create and save plot as a png file in the working directory.
png(file = "plot2.png")
with(data, plot(Time, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off()
|
ecdfc7479641870a2010bff2c71876926e2dba8c
|
4e01acf5a07af95846300ed1016edf601fdbb6cc
|
/Rprogramming/assignment1/mysubmit.R
|
5caa5e27eced377db5094334641fe12857c021c0
|
[] |
no_license
|
carolcoder/datasciencecoursera
|
5b5c8e9ca270ba961061c4ae4b5dcacfdcf1bab5
|
d80a4ac780506179ab1e25cf559256f2f9de4a31
|
refs/heads/master
| 2021-01-23T02:49:10.301308
| 2015-08-07T20:06:33
| 2015-08-07T20:06:33
| 30,250,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35
|
r
|
mysubmit.R
|
source("submitscript1.R")
submit()
|
c5e77bc37d17c33ca009a2354a615246fb24a76e
|
7b7519e5b264e67d0c837a6a4024c965bca827ac
|
/programs/summary_variables/bootstrap/make_leaf_p_retranslocation_coefficient_bootstrap.R
|
c5fbb79af4bc1aa4f11f04622566163eb208c37c
|
[] |
no_license
|
SoilTSSM/EucFACE_P_synthesis
|
32b0cb47b31d4eddbed6739f20142bfac80ecd14
|
006c65fdef6203b77f03afe1fb05b261f16ec6b5
|
refs/heads/master
| 2020-04-05T20:44:57.715117
| 2018-11-09T04:53:59
| 2018-11-09T04:53:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,747
|
r
|
make_leaf_p_retranslocation_coefficient_bootstrap.R
|
#- Make the retranslocation coefficient
make_leaf_p_retranslocation_coefficient_bootstrap <- function(){
df <- read.csv("download/FACE_P0020_RA_leafP-Eter_20130201-20151115_L1.csv")
### setting up the date
df$Date <- paste0("1-", as.character(df$Campaign))
df$Date <- as.Date(df$Date, "%d-%b-%y")
### per ring leaf P % - litter
df.litter <- subset(df, Type == "Leaf litter")
### Leaf litter p, average across rings, ignore dates, unit = %
df.litter.p <- summaryBy(PercP~Ring,
data=df.litter,FUN=mean,keep.names=T,na.rm=T)
### per ring leaf P % - green
df.green <- subset(df, Type == "green leaf")
### Leaf green p, average across rings, ignore dates, unit = %
df.green.p <- summaryBy(PercP~Ring,
data=df.green,FUN=mean,keep.names=T,na.rm=T)
### per ring leaf P % - dead
df.dead <- subset(df, Type == "sceneced leaf")
### Leaf dead p, average across rings, ignore dates, unit = %
df.dead.p <- summaryBy(PercP~Ring,
data=df.dead,FUN=mean,keep.names=T,na.rm=T)
### compare P% across green, dead and litter leaves
require(ggplot2)
pdf("plots_tables/Leaf_P_concentration.pdf")
plotDF <- rbind(df.green.p, df.dead.p, df.litter.p)
plotDF$Category <- rep(c("green", "dead", "litter"), each = 6)
p <- ggplot(plotDF, aes(Ring, PercP)) +
geom_bar(aes(fill = Category), position = "dodge", stat="identity") +
xlab("Ring") + ylab("P concentration (%)") +
ggtitle("P concentration comparison across leaf tissues")
plot(p)
dev.off()
### calculate leaf P retranslocation rate based on dead and green leaf
retransDF <- cbind(df.green.p, df.dead.p$PercP)
colnames(retransDF) <- c("Ring", "green", "dead")
retransDF$retrans_coef <- (retransDF$green - retransDF$dead) / retransDF$green
#retransDF$retrans_coef <- 1 - (retransDF$percent_diff/retransDF$green)
### Plot eCO2 effect on retranslocation coefficient
retransDF$CO2 <- c("eCO2", "aCO2", "aCO2", "eCO2", "eCO2", "aCO2")
pdf("plots_tables/CO2_effect_on_P_retranslocation_coefficient.pdf")
p <- ggplot(retransDF, aes(CO2, retrans_coef*100, color=factor(Ring))) +
geom_point(size = 5) +
xlab("Treatment") + ylab("Leaf P retranslocation coefficient (%)") +
ggtitle("CO2 effect on P retranslocation coefficient") +
scale_color_manual(values=c("#FF7F50", "#00FFFF", "#6495ED",
"#FF4040", "#8B0000", "#0000FF"))
plot(p)
dev.off()
outDF <- retransDF[,c("Ring", "retrans_coef", "CO2")]
return(outDF)
}
|
06089eb06adc0611a740473d927e17f957b27f2e
|
ba0c0961efc8eccdb432ea21552f65d461f44518
|
/tests/testthat/test_biosample_api.R
|
1a88228dcf6e5d34a12c0682fbdc1dfcafde95b7
|
[] |
no_license
|
waldronlab/omicidxClientR
|
a3addddb5c455228a6d33d4af7ea9360ff1e8c6f
|
2296e785c7acc670d9c7f9821934cbbc80bc2741
|
refs/heads/main
| 2023-03-25T22:50:15.624236
| 2021-03-18T17:41:30
| 2021-03-18T17:41:30
| 349,137,249
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,487
|
r
|
test_biosample_api.R
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test BiosampleApi")
api.instance <- BiosampleApi$new()
test_that("BiosampleByAccessionBiosampleSamplesAccessionGet", {
# tests for BiosampleByAccessionBiosampleSamplesAccessionGet
# base path: http://localhost
# Biosample By Accession
# @param accession character An accession for lookup
# @param include.fields array[character] Fields to include in results. The default is to all fields (*) (optional)
# @param exclude.fields array[character] Fields to exclude from results. The default is to not exclude any fields. (optional)
# @return [AnyType]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("BiosamplesBiosampleSamplesGet", {
# tests for BiosamplesBiosampleSamplesGet
# base path: http://localhost
# Biosamples
# @param q character The query, using [lucene query syntax](https://lucene.apache.org/core/3_6_0/queryparsersyntax.html) (optional)
# @param size integer (optional)
# @param cursor character The cursor is used to scroll through results. For a query with more results than `size`, the result will include `cursor` in the result json. Use that value here and re-issue the query. The next set or results will be returned. When no more results are available, the `cursor` will again be empty in the result json. (optional)
# @param facet.size integer The maximum number of records returned for each facet. This has no effect unless one or more facets are specified. (optional)
# @param include.fields array[character] Fields to include in results. The default is to all fields (*) (optional)
# @param exclude.fields array[character] Fields to exclude from results. The default is to not exclude any fields. (optional)
# @param facets array[character] A list of strings identifying fields for faceted search results. Simple term faceting is used here, meaning that fields that are short text and repeated across records will be binned and counted. (optional)
# @return [ResponseModel]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("MappingBiosampleFieldsEntityGet", {
# tests for MappingBiosampleFieldsEntityGet
# base path: http://localhost
# Mapping
# @param entity character
# @return [AnyType]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
|
f7fa0ad2bed996352c56d6e73f94d4929787ca29
|
ba2b161d5fa2ade933922a8d6719e73ea41e2560
|
/run_analysis.R
|
b58db44df011d552ad04b577db3f335d308e097b
|
[] |
no_license
|
faaransaleem/course3project
|
ab98553bcad8c13ce464941b6731bf5a2b4210c2
|
ef8c4f379276d29fe7a83205dffc21ab5801306a
|
refs/heads/master
| 2020-03-21T00:21:09.186712
| 2018-06-20T11:43:24
| 2018-06-20T11:43:24
| 137,888,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,567
|
r
|
run_analysis.R
|
library("data.table")
##Play with standard files
act <- read.table("UCI HAR Dataset/activity_labels.txt")[,2]
features <- read.table("UCI HAR Dataset/features.txt")[,2]
ourfeatures <- grepl("mean|std" , features)
##Play with Test files
xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
ytest <- read.table("./UCI HAR Dataset/test/y_test.txt")
subtest <- as.data.table(read.table("./UCI HAR Dataset/test/subject_test.txt"))
names(xtest) <- features
xtest <- xtest[,ourfeatures]
ytest[,2] <- act[ytest[,1]]
names(ytest) <- c( "V1" = "actcode" , "V2" = "actname")
names(subtest) <- c("V1" = "subject")
##one data set for test files
datatest <- cbind(subtest,xtest,ytest)
##Play with Train files
xtrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
subtrain <- as.data.table(read.table("./UCI HAR Dataset/train/subject_train.txt"))
names(xtrain) <- features
xtrain <- xtrain[,ourfeatures]
ytrain[,2] <- act[ytrain[,1]]
names(ytrain) <- c( "V1" = "actcode" , "V2" = "actname")
names(subtrain) <- c("V1" = "subject")
##one data set for train files
datatrain <- cbind(subtrain,xtrain,ytrain)
##combining train and test files
data <- rbind(datatest, datatrain)
grouped <- melt(data, id = c("subject", "actcode", "actname"), measure.vars = setdiff(colnames(data),c("subject", "actcode", "actname")))
##step5
clean <- dcast(grouped, subject + actname ~ variable, mean)
## writing files
write.table(clean, file = "./course3project.txt", row.name = FALSE)
|
cf03d1a9da4fb9d0eeee970f4508461db24afb1d
|
deeb61b4710c15dd88c79843c0b80bdff0231d57
|
/R/krikinton-package.R
|
ec56a8007f41f98686692630e587c8e13e3cd3c0
|
[
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
paithiov909/krikinton
|
e7dcd9035facdedd17061bf64662441315710e8f
|
d5b8f716b627379beab5769b4b51a3a70ddae3e5
|
refs/heads/main
| 2023-06-03T20:27:37.525416
| 2021-06-21T14:14:13
| 2021-06-21T14:14:13
| 304,878,749
| 1
| 0
|
MIT
| 2021-02-15T16:56:49
| 2020-10-17T13:00:50
|
R
|
UTF-8
|
R
| false
| false
| 351
|
r
|
krikinton-package.R
|
#' krikinton: rJava Wrapper of Sudachi and Kintoki
#' @docType package
#' @name krikinton
#' @import rJava
#' @import dplyr
#' @import purrr
#' @importFrom jsonlite toJSON
#' @importFrom pkgload is_dev_package
#' @importFrom stringi stri_enc_toutf8
#' @importFrom tibble tibble as_tibble
#' @importFrom tidyr separate
#' @keywords internal
"_PACKAGE"
|
50f958d3fbff169154dfb6cb849418b0ae272a13
|
17a7f2333706ad280247d187f4aedbeb32714714
|
/ui.R
|
46700db4f8109312f8812b4ee4de33303af05967
|
[] |
no_license
|
t707722/city-weather
|
90c3599d55f8ad0104dc2686ffacc132187a0eea
|
ef1eafcd9e246f8518b8ee0054c63be661f9d68b
|
refs/heads/master
| 2020-09-08T02:13:14.001145
| 2018-09-04T13:26:26
| 2018-09-04T13:26:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
ui.R
|
shinyUI(
fluidPage(
theme = shinytheme("yeti"),
titlePanel("Погода в российских городах-миллионниках"),
tags$br(),
fluidRow(column(4, offset = 1, selectInput("city", NULL, choices = cities$city, selectize = TRUE, width = "100%"))),
fluidRow(
column(7, offset = 1, highchartOutput("hc1", height = 648))
),
fluidRow(column(5, offset = 1, gsub("<p><img src=\".*\"/></p>", "", includeMarkdown("README.md"))))
)
)
|
8da5721556ebe840c1d6cd74b423aa8c870a894f
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/steadyICA/R/kcdf_fun.R
|
7a83db39e17d61045f10c06d205a0cc77e3b5cdc
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,872
|
r
|
kcdf_fun.R
|
#-----------------------
# Benjamin Risk
# 25 February 2013
# Edits to David Matteson's PITdCovICA code.
#-----------------------
#---------------------------------------------------
# Benjamin Risk
# 12 March 2013
# modified stats::density.default to return the distribution and the density;
# note that only the Gaussian kernel works--additional edits needed for other kernels.
#-----------------------------------------------------
BinDist <- function(x,weights,lo,hi,n){
.Call('BinDistC',x,weights,lo,hi,n)
}
kcdf<-function(x, bw = "SJ", adjust = 1, kernel ="gaussian", weights=NULL, n = 512, from, to, cut = 3) {
x = sort(x)
if (!is.numeric(x)) stop("argument 'x' must be numeric")
x <- as.vector(x)
x.na <- is.na(x)
if (any(x.na)) stop("'x' contains missing values")
N <- nx <- length(x)
x.finite <- is.finite(x)
if (any(!x.finite)) {
x <- x[x.finite]
nx <- length(x)
}
if (is.null(weights)) {
weights <- rep.int(1/nx, nx)
totMass <- nx/N
}
else {
if (length(weights) != N)
stop("'x' and 'weights' have unequal length")
if (!all(is.finite(weights)))
stop("'weights' must all be finite")
if (any(weights < 0))
stop("'weights' must not be negative")
wsum <- sum(weights)
if (any(!x.finite)) {
weights <- weights[x.finite]
totMass <- sum(weights)/wsum
}
else totMass <- 1
if (!isTRUE(all.equal(1, wsum)))
warning("sum(weights) != 1 -- will not get true density")
}
n.user <- n
n <- max(n, 512)
if (n > 512)
n <- 2^ceiling(log2(n))
if (is.character(bw)) {
if (nx < 2)
stop("need at least 2 points to select a bandwidth automatically")
bw <- switch(tolower(bw), nrd0 = bw.nrd0(x), nrd = bw.nrd(x), ucv = bw.ucv(x), bcv = bw.bcv(x), sj = , `sj-ste` = bw.SJ(x,method = "ste"), `sj-dpi` = bw.SJ(x, method = "dpi"),stop("unknown bandwidth rule"))
}
if (!is.finite(bw))
stop("non-finite 'bw'")
bw <- adjust * bw
if (bw <= 0)
stop("'bw' is not positive.")
if (missing(from))
from <- min(x) - cut * bw
if (missing(to))
to <- max(x) + cut * bw
if (!is.finite(from))
stop("non-finite 'from'")
if (!is.finite(to))
stop("non-finite 'to'")
lo <- from - 4 * bw #They already have from <- min(x) - cut*bw; why this extra?
up <- to + 4 * bw
y <- BinDist(x, weights, lo, up, n)*totMass
kords <- seq.int(0, 2 * (up - lo), length.out = 2L * n)
kords[(n + 2):(2 * n)] <- -kords[n:2] #What is this doing???
##EDITS: original commented out
#kords <- switch(kernel, gaussian = dnorm(kords, sd = bw))
kords.temp <- kords
kords <- pnorm(-kords.temp, sd = bw)
kords.den <- dnorm(kords.temp, sd = bw)
rm(kords.temp)
kords <- fft(fft(y) * Conj(fft(kords)), inverse = TRUE)
kords.den <- fft(fft(y) * Conj(fft(kords.den)), inverse = TRUE)
kords <- pmax.int(0, Re(kords)[1L:n]/length(y))
kords.den <- pmax.int(0, Re(kords.den)[1L:n]/length(y))
xords <- seq.int(lo, up, length.out = n)
#x <- seq.int(from, to, length.out = n.user)
#y = approx(xords, kords, x)$y
#rval <- approxfun(x, y,method = "linear", yleft = 0, yright = 1, f = 0, ties = "ordered")
rval <- approxfun(xords, kords, method = "linear", yleft = 0, yright = 1, f = 0, ties = "ordered")
denval <- approxfun(xords, kords.den, method = "linear", yleft = lo, yright = up, f = 0, ties = "ordered")
class(rval) <- c("ecdf", "stepfun", class(rval))
attr(rval, "call") <- sys.call()
class(denval) <- c("pdf", "fun", class(rval))
attr(denval, "call") <- sys.call()
return(list(Fx = rval, fx = denval))
}
#------------------------
est.PIT = function(S, bw='nrd0',adjust = 1){
n <- nrow(S)
d <- ncol(S)
SH = sh = matrix(0,n,d)
for(j in 1:d){
KCDF = kcdf(S[,j], bw=bw, adjust = adjust)
SH[,j] = KCDF$Fx(S[,j])
sh[,j] = KCDF$fx(S[,j])
}
return(list(Fx = SH, fx = sh))
}
|
f3f71e8f7c128e48dc6a125e984b47cb1cc49d43
|
c2e28f45847f8f5170d7ed90d406d9d5c3594625
|
/man/norm.samps.Rd
|
00e5d5d4a8323a5568d4bb73b27d196d9220cd23
|
[] |
no_license
|
mdedge/stfspack
|
102d4ef512f21073dc2593db2265630040214357
|
3e7027d677c9017a0e3abaed7d99ef2ac7cf5d5d
|
refs/heads/master
| 2020-03-29T07:03:16.471914
| 2018-09-21T22:33:01
| 2018-09-21T22:33:01
| 149,651,412
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 879
|
rd
|
norm.samps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R_functions.R
\name{norm.samps}
\alias{norm.samps}
\title{Generate a matrix of samples from a normal distribution.}
\usage{
norm.samps(mu = 0, sigma = 1, n = 25, nsamps = 10000)
}
\arguments{
\item{mu}{The expectation of the normal distribution from which to draw samples.}
\item{sigma}{The standard deviation of the normal distribution from which to draw samples.}
\item{n}{The number of independent observations to include in each sample.}
\item{nsamps}{The number of samples to generate.}
}
\value{
A matrix of independent normally distributed random numbers with nsamps rows and n columns.
}
\description{
Draws normal samples and formats them into a matrix, where each row contains a sample.
}
\examples{
norm.samps(10, 1, 5, 8)
}
\keyword{distribution}
\keyword{normal}
\keyword{simulation,}
|
3a0a53c18cad4032d182a57b817865e78eb642a0
|
feeae2f7e21a7ed662a5ad9b796631e06fb7f7d1
|
/pensionFuns.R
|
e1247f45789799407edc9bdc583bddd834848e2a
|
[] |
no_license
|
ReasonFoundation/R-sandbox
|
b26e5d1514fc4e25d5f7e27b71c11f2115867731
|
270ffc532244521d8210bbdd984ce82191834d41
|
refs/heads/master
| 2020-03-27T22:55:10.963479
| 2019-10-17T21:19:56
| 2019-10-17T21:19:56
| 147,272,110
| 1
| 0
| null | 2019-06-28T23:04:03
| 2018-09-04T01:37:58
|
HTML
|
UTF-8
|
R
| false
| false
| 18,286
|
r
|
pensionFuns.R
|
# This script contains functions used to load pension plan data either from Reason's database or
# from an excel file.
# Author: Andrew Abbott
# Date: 12/11/2018
# Color Scheme
# All images should use web safe colors — this gives us a range of orange and blue
# colors that fit with Reason’s branding, as well as reds and greens that we can use to
# indicate positive or negative data patterns. In general, it is best to choose from the
# following palette of colors:
# • Orange (FF6633): Orange that matches with Reason’s logo
# • Yellow (FFCC33)
# • Dark Grey/Blue (333333)
# • Light Blue (3399CC)
# • Royal Blue (3366CC)
# • Grey (6699CC)
# Use the orange and yellow colors to emphasize attention to lines or areas of
# interest.
# For graphs that require a clear positive/negative emphasis, you can use the
# following colors:
# • Green (669933): for positive
# • Red (990000): for negative
# This function installs the required packages.
# Usage: installRequiredPackages()
installRequiredPackages <- function() {
packages_needed <- c('tidyverse', 'RPostgres', 'ggplot2', 'httr', 'ggthemes', 'extrafont', 'scales', 'DT', 'lubridate', 'janitor', 'config', 'here')
installed <- installed.packages()
sapply(packages_needed, function(p)
if(!p %in% installed[,1]){
install.packages(p)
}
)
}
# This function grabs a list of the plans with their state from the Reason database.
# Use this to find the exact plan names that are used in Reason's database.
# Usage: This function has no parameters so calling the function will return the list of plans.
# A where clause can be added in the query to pull specific plans or plans from specific states.
# It would be inserted above the order by line.
# example: where state.name in ('Texas', 'Arkansas')
# example2: where plan.id in (30,31,33,90,91,466,1469,1473,1875,1877,1878,1913,1915)
require(ggplot2)
planList <- function() {
require(RPostgres)
require(httr)
require(tidyverse)
require(janitor)
require(config)
dw <- config::get("datawarehouse")
con <- dbConnect(
Postgres(),
dbname = trimws(dw$path),
host = dw$hostname,
port = dw$port,
user = dw$username,
password = dw$password,
sslmode = "require"
)
# define the query to retrieve the plan list
q1 <- "select plan.id,
display_name,
state.name as State
from plan
inner join government
on plan.admin_gov_id = government.id
inner join state
on government.state_id = state.id
order by state.name"
# sends the query to the connection
res <- dbSendQuery(con, q1)
# fetches the results
plans <- dbFetch(res)
p_list <- plans %>%
mutate_if(sapply(plans, is.character), as.factor) %>%
clean_names()
# clears the results
dbClearResult(res)
# closes the connection
dbDisconnect(con)
p_list
}
####################################################################
# Description: This function pulls data for a selected plan from the Reason database.
# Parameters: pl is the variable containing the plan list returned by the planList() function.
# The second parameter is the plan's name as found in the plan list.
# Usage: example: allData <- pullData(pl, "Kansas Public Employees' Retirement System")
pullData <-
function(pl, plan_name = "Texas Employees Retirement System") {
require(RPostgres)
require(httr)
require(tidyverse)
require(janitor)
require(config)
dw <- config::get("datawarehouse")
con <- dbConnect(
Postgres(),
dbname = trimws(dw$path),
host = dw$hostname,
port = dw$port,
user = dw$username,
password = dw$password,
sslmode = "require"
)
# define the query to retrieve the plan data
query <- "select plan_annual_attribute.year,
plan.id,
plan.display_name,
state.name as state,
plan_attribute.name as attribute_name,
plan_annual_attribute.attribute_value,
data_source_id,
data_source.name as data_source_name
from plan_annual_attribute
inner join plan
on plan_annual_attribute.plan_id = plan.id
inner join government
on plan.admin_gov_id = government.id
inner join state
on government.state_id = state.id
inner join plan_attribute
on plan_annual_attribute.plan_attribute_id = plan_attribute.id
inner join data_source
on plan_attribute.data_source_id = data_source.id
where cast(plan_annual_attribute.year as integer) >= 1980 and
data_source_id <> 1 and
plan_id = $1"
plan_id <- pl$id[pl$display_name == plan_name]
result <- dbSendQuery(con, query)
dbBind(result, list(plan_id))
all_data <- dbFetch(result) %>%
clean_names()
dbClearResult(result)
dbDisconnect(con)
all_data %>%
group_by_at(vars(-attribute_value)) %>% # group by everything other than the value column.
mutate(row_id = 1:n()) %>%
ungroup() %>% # build group index
spread(attribute_name, attribute_value, convert = TRUE) %>% # spread
select(-row_id) %>% # drop the index
clean_names()
}
####################################################################
# Description: This function loads plan data from an Excel file
# Parameters: The filename including the path if in a subdirectory
# Usage: allWide <- loadData('data/NorthCarolina_PensionDatabase_TSERS.xlsx')
loadData <- function(filename) {
require(tidyverse)
require(janitor)
require(readxl)
read_excel(filename, col_types = "numeric") %>%
clean_names()
}
####################################################################
# Description: This function selects the data used in the 'mountain of debt' graph
# Parameters:
# wideData = a datasource in wide format
# .year_var = the name of the column conatining the year
# .aal_var = the name of the column containing the AAL, default is Reason db column name
# .asset_var = the name of the column containing the Actuarial Assets, default to Reason db name.
# base: Does the plan report their numbers by the thousand dollar or by the dollar?
# default is 1000, change to 1 for plans that report by the dollar
# Usage: data <- modData(allWide,
# .year_var = 'Fiscal Year End',
# .aal_var = 'Actuarial Accrued Liability',
# .asset_var = 'Actuarial Value of Assets',
# base = 1)
modData <- function(wide_data,
.year_var = "year",
.aal_var = "actuarial_accrued_liabilities_under_gasb_standards",
.asset_var = "actuarial_assets_under_gasb_standards",
base = 1000) {
require(tidyverse)
year_var <- sym(.year_var)
aal_var <- sym(.aal_var)
asset_var <- sym(.asset_var)
wide_data %>%
select(year = !!year_var, actuarial_assets = !!asset_var, aal = !!aal_var) %>%
mutate(
uaal = as.numeric(aal) - as.numeric(actuarial_assets),
# create a UAAL column as AAL-Actuarial Assets
funded_ratio = as.numeric(actuarial_assets) / as.numeric(aal),
# create a fundedRatio column as Actuarial Assets divided by AAL
) %>%
mutate(
actuarial_assets = as.numeric(actuarial_assets) * base,
aal = as.numeric(aal) * base,
uaal = uaal * base
) %>%
drop_na()
}
####################################################################
# Description: This saves the theme for reuse in multiple plots
# must have ggplot2 require loaded
# Parameters: none
# Usage: ggplot(...) + reasonTheme
reasonTheme <- theme(
# removes legend
legend.position = "none",
# details the x-axis text
axis.text.x = element_text(
face = "bold",
size = 14,
# 0.5 centers the label on the tick mark
vjust = 0.5,
angle = 90,
color = "black"
),
axis.title.x = element_blank(),
# axis lines set to black
axis.line.x = element_line(color = "black"),
axis.line.y = element_line(color = "black"),
# left and right y-axis title and text fonts set
axis.title.y.left = element_text(face = "bold", size = 14, color = "black"),
axis.text.y.left = element_text(face = "bold", size = 14, color = "black"),
axis.title.y.right = element_text(face = "bold", size = 14, color = "black"),
axis.text.y.right = element_text(face = "bold", size = 14, color = "black"),
# sets the background to blank white
panel.background = element_blank()
)
####################################################################
# Description: This function creates the mountain of debt graph
# Parameters:
# data: the dataframe created by the modData function
# Usage: modGraph(data)
modGraph <- function(data) {
require(tidyverse)
require(ggthemes)
require(extrafont)
require(scales)
# extrapolate between years linearly
extrapo <- approx(data$year, data$uaal, n = 10000)
extrapo2 <- approx(data$year, data$funded_ratio, n = 10000)
graph <-
data.frame(
year = extrapo$x,
uaal = extrapo$y,
funded_ratio = extrapo2$y
)
# create a "negative-positive" column for fill aesthetic
graph$sign[graph$uaal >= 0] <- "positive"
graph$sign[graph$uaal < 0] <- "negative"
ggplot(graph, aes(x = year)) +
# area graph using pos/neg for fill color
geom_area(aes(y = uaal, fill = sign)) +
# line tracing the area graph
geom_line(aes(y = uaal)) +
# line with funded ratio
geom_line(aes(y = funded_ratio * (max(graph$uaal))), color = "#3300FF", size = 1) +
# axis labels
labs(y = "Unfunded Accrued Actuarial Liabilities", x = NULL) +
# colors assigned to pos, neg
scale_fill_manual(values = c("negative" = "#669900", "positive" = "#CC0000")) +
# sets the y-axis scale
scale_y_continuous(
# creates 10 break points for labels
breaks = pretty_breaks(n = 10),
# changes the format to be dollars, without cents, scaled to be in billions
labels = dollar_format(
prefix = "$",
scale = (1e-9),
largest_with_cents = 1
),
# defines the right side y-axis as a transformation of the left side axis, maximum UAAL = 100%, sets the breaks, labels
sec.axis = sec_axis(
~ . / (max(graph$uaal) / 100),
breaks = pretty_breaks(n = 10),
name = "Funded Ratio",
labels = function(b) {
paste0(round(b, 0), "%")
}
),
# removes the extra space so the fill is at the origin
expand = c(0, 0)
) +
# sets the x-axis scale
scale_x_continuous( # sets the years breaks to be every 2 years
breaks = round(seq(min(graph$year), max(graph$year), by = 2), 1),
expand = c(0, 0)
) +
# adds the Reason theme defined previously
reasonTheme
}
####################################################################
# Description: This function creates a data table containing the data in the mountain of debt graph.
# Parameters:
# data: the dataframe created by the modData function
# Usage: modTable(data)
modTable <- function(data) {
require(DT)
require(tidyverse)
data <- data %>%
# give the columns pretty names
rename(
"Year" = year,
"Actuarial Assets" = actuarial_assets,
"Actuarial Accrued Liabilities" = aal,
"Unfunded Actuarial Accrued Liabilities" = uaal,
"Funded Ratio" = funded_ratio
)
# create a datatable
datatable(
data,
# add buttons for export, etc.
extensions = c("Buttons"),
# remove row names
rownames = FALSE,
# allow editing the table, experimenting with this one
editable = TRUE,
options = list(
bPaginate = FALSE,
scrollX = T,
scrollY = "600px",
dom = "Brt",
buttons = list(
"copy",
list(
extend = "csv",
text = "csv",
title = "MOD"
),
list(
extend = "excel",
text = "Excel",
title = "MOD"
),
list(
extend = "pdf",
text = "pdf",
title = "MOD"
)
)
)
) %>%
formatCurrency(c(2:4)) %>%
formatPercentage(5, 2)
}
####################################################################
# Description: This function creates a graph in the Gain/Loss format
# Parameters:
# filename: the name of the file containing the gain/loss data
# ylab: The y-axis label, default set
# Usage: glGraph(filename = 'data/Graph 1.csv')
glGraph <-
function(filename, ylab = "Changes in Unfunded Liability (in Billions)") {
require(ggplot2)
require(tidyverse)
graph1 <- read_csv(filename) %>% # load data from csv file
gather("label", "value") %>% # put in long format with label-value pairs
mutate(label = str_wrap(label, 8)) %>% # wrap the label names to clean up axis labels
mutate(label = str_to_title(label)) %>% # properly capitalize the labels
# assign pos/neg/total to the values for fill color
mutate(
sign = case_when(
value >= 0 ~ "positive",
value < 0 ~ "negative"
)
) %>%
mutate(sign = case_when(label == "Total" ~ "total", TRUE ~ sign)) %>%
mutate(sign = factor(sign, levels = c("total", "negative", "positive"))) %>%
mutate(label = factor(label, levels = label[order(sign, value, label, decreasing = TRUE)], ordered = TRUE))
# assign colors to go with signs
fill_colors <- c(
"negative" = "#669900",
"positive" = "#CC0000",
"total" = "#FF6633"
)
# create plot
ggplot(graph1, aes(x = label, y = value)) +
geom_col(width = 0.75, aes(fill = sign)) +
geom_hline(yintercept = 0, color = "black") +
scale_fill_manual(values = fill_colors) +
scale_y_continuous(breaks = pretty_breaks(), labels = dollar_format(prefix = "$")) +
ylab(ylab) +
reasonTheme +
theme(
axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_text(angle = 0)
)
# ggsave("graph1.2.png", width = 9, height = 5.33)
}
####################################################################
# Description: This function selects the data used in several graphs
# Parameters:
# wideData = a datasource in wide format
# .date_var = column name for valuation date. Default: 'Actuarial Valuation Date For GASB Assumptions',
# .aal_var = column name AAL. Default: 'Actuarial Accrued Liabilities Under GASB Standards',
# .asset_var = column name for Actuarial Assets. Default: 'Actuarial Assets under GASB standards',
# .adec_var = column name for ADEC. Default: 'Employer Annual Required Contribution',
# .emp_cont_var = column name for employer contributions. Default: 'Employer Contributions',
# .payroll_var = column name for payroll. Default: 'Covered Payroll'
# Usage: data <- selected_Data(wideData,
# date_var = 'Actuarial Valuation Date For GASB Assumptions',
# aal_var = 'Actuarial Accrued Liabilities Under GASB Standards',
# asset_var = 'Actuarial Assets under GASB standards',
# adec_var = 'Employer Annual Required Contribution',
# emp_cont_var = 'Employer Contributions',
# payroll_var = 'Covered Payroll')
selectedData <- function(wide_data,
.date_var = "actuarial_valuation_date_for_gasb_assumptions",
.aal_var = "actuarial_accrued_liabilities_under_gasb_standards",
.asset_var = "actuarial_assets_under_gasb_standards",
.adec_var = "employer_annual_required_contribution",
.emp_cont_var = "employer_contributions",
.payroll_var = "covered_payroll") {
require(tidyverse)
require(lubridate)
require(janitor)
date_var <- sym(.date_var)
aal_var <- sym(.aal_var)
asset_var <- sym(.asset_var)
adec_var <- sym(.adec_var)
emp_cont_var <- sym(.emp_cont_var)
payroll_var <- sym(.payroll_var)
wide_data %>%
mutate(
date = !!date_var
) %>%
mutate(
year = year(excel_numeric_to_date(as.numeric(date))),
valuation_date = excel_numeric_to_date(as.numeric(date))
) %>%
select(
year,
valuation_date,
actuarial_assets = !!asset_var,
aal = !!aal_var,
adec = !!adec_var,
emp_cont = !!emp_cont_var,
payroll = !!payroll_var
) %>%
mutate(
uaal = as.numeric(aal) - as.numeric(actuarial_assets),
funded_ratio = as.numeric(actuarial_assets) / as.numeric(aal),
adec_contribution_rates = as.numeric(adec) / as.numeric(payroll),
actual_contribution_rates = as.numeric(emp_cont) / as.numeric(payroll)
) %>%
drop_na()
}
####################################################################
# Description: This function creates a graph comparing 2 percentages
# Parameters:
# data: the dataframe created by the selected_Data function
# Usage: contGraph(data)
contGraph <- function(data,
y1 = "ADEC Contribution Rates",
y2 = "Actual Contribution Rates (Statutory)",
y3 = NULL,
labelY = NULL,
label1 = NULL,
label2 = NULL,
label3 = NULL) {
require(ggplot2)
require(tidyverse)
require(scales)
graph <- data %>%
select(
year,
y1,
y2,
y3
) %>%
mutate_all(funs(as.numeric)) %>%
rename(label1 = y1, label2 = y2, label3 = y3) %>%
gather(key = keys, value = amount, -year)
lineColors <- c(
y1 = "#FF6633",
y2 = "#3300FF",
y3 = "#333333"
)
labs <- c(
label1,
label2,
label3
)
ggplot(graph, aes(x = year)) +
geom_line(aes(y = amount * 100, color = keys), size = 2) +
scale_fill_manual(values = lineColors) +
geom_hline(yintercept = 0, color = "black") +
scale_y_continuous(
breaks = pretty_breaks(10),
labels = function(b) {
paste0(round(b, 0), "%")
}
) +
scale_x_continuous(breaks = pretty_breaks(10)) +
ylab(labelY) +
scale_color_discrete(labels = labs) +
reasonTheme +
theme(
legend.justification = c(1, 1),
legend.position = c(0.5, 1),
legend.title = element_blank()
)
}
|
5bbdd87db74337d13c3785c629f442ea7b8ada74
|
1fba8b717eb4b471d268723e448a2fae9e2c514d
|
/4_dihaploid_pools/analysis/MM_parent_snps.R
|
91e1c2c131f5f973f83d24a70e103d3ccfa75ae3
|
[] |
no_license
|
kramundson/MM_manuscript
|
5cd2b93043aba74f45269204682ed19b07bbfdf6
|
fe5b212eaecb0144c61de4f6075fc2eebff3819f
|
refs/heads/master
| 2023-06-17T05:04:06.022695
| 2021-07-09T22:51:57
| 2021-07-09T22:51:57
| 301,579,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,651
|
r
|
MM_parent_snps.R
|
#' ---
#' title: "MM Parent SNPs"
#' author: Kirk Amundson
#' date: 2020_1007
#' output: html_notebook
#' ---
#'
#' Aim: Define parent-specific SNP loci and inducer/non-inducer specific alleles
#' at these loci for low-pass SNP analysis of MM dihaploid cohorts.
#'
#' Low quality sites were filtered out in the preceding step based on attributes
#' of that site across all samples. Here, I implement sample-specific filters
#' to generate a flat tsv of parent-informative SNP loci to use in binned genotyping
#' for chromosome parental origin tests.
#'
#' ## Packages:
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
library(tidyverse)
#'
#' ## Functions:
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
# separate sample-specific VCF attributes
sep <- function(...) {
dots <- list(...)
separate_(..., into = paste(dots[[2]], attributes[[1]], sep = "_"), convert = T, sep = ":")
}
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
# filter to retain only those loci with called homozygous genotypes for alternate alleles in two specified samples
filter_homozygous_vars <- function(tetraploid, hi, snplist, dp_threshold) {
nhi_gt <- enexpr(tetraploid)
hi_gt <- enexpr(hi)
nhi_dp <- str_replace(nhi_gt, "GT", "DP")
hi_dp <- str_replace(hi_gt, "GT", "DP")
hom <- snplist %>%
filter(!!nhi_gt %in% c("0/0/0/0", "1/1/1/1")) %>%
filter(!!nhi_dp >= dp_threshold) %>%
filter(!!hi_gt %in% c("0/0", "1/1")) %>%
filter(!!hi_dp >= dp_threshold) %>%
filter(!(!!nhi_gt == "0/0/0/0" & !!hi_gt == "0/0")) %>%
filter(!(!!nhi_gt == "1/1/1/1" & !!hi_gt == "1/1"))
plt <- hom %>%
ggplot(., aes(x = POS)) +
geom_histogram(binwidth = 1e6) +
facet_wrap(~CHROM, strip.position = "r", nrow = 7) +
scale_y_log10() +
theme_bw()
print(plt)
print(paste(nrow(hom), "SNP retained", sep = "_"))
return(hom)
}
#'
#' ## Read in data:
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
# either download files to local or mount to server via, e.g., sshfs
files <- dir(pattern = "-filtered-",
path = "../data/calls/",
full.names = T)
files
#'
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
snps <- map_dfr(files, function(x) read_tsv(x, col_names = T, na = "NA"))
#'
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
names(table(snps$FORMAT)) # should only have one entry. does.
attributes <- str_split(names(table(snps$FORMAT[1])), ":", simplify = F)
attributes[[1]]
sample_vars <- colnames(snps)[-c(seq(1,49), ncol(snps))]
#'
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
snps2 <- snps %>%
Reduce(f = sep, x = sample_vars)
#'
#' WA.077 x IVP101 (n=50)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(WA077_GT, IVP101_GT, snps2, 10) %>%
mutate(WA077 = ifelse(WA077_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP101, WA077) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "WA077-IVP101-hom-SNP.tsv", col_names = T)
#'
#' WA.077 x IVP35 (n=134)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(WA077_GT, IVP35_GT, snps2, 10) %>%
mutate(WA077 = ifelse(WA077_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP35, WA077) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "WA077-IVP35-hom-SNP.tsv", col_names = T)
#'
#' WA.077 x PL4 (n=107)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(WA077_GT, PL4_GT, snps2, 10) %>%
mutate(WA077 = ifelse(WA077_GT == "0/0/0/0", REF, ALT)) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, PL4, WA077) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "WA077-PL4-hom-SNP.tsv", col_names = T)
#'
#' LR00.014 x IVP101 (n=30)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(LR00014_GT, IVP101_GT, snps2, 10) %>%
mutate(LR00014 = ifelse(LR00014_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT)) %>%
dplyr::select(CHROM, POS, REF, IVP101, LR00014) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "LR00014-IVP101-hom-SNP.tsv", col_names = T)
#'
#' LR00.014 x IVP35 (n=77)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(LR00014_GT, IVP35_GT, snps2, 10) %>%
mutate(LR00014 = ifelse(LR00014_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP35, LR00014) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "LR00014-IVP35-hom-SNP.tsv", col_names = T)
#'
#' LR00.014 x PL4 (n=67)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(LR00014_GT, PL4_GT, snps2, 10) %>%
mutate(LR00014 = ifelse(LR00014_GT == "0/0/0/0", REF, ALT)) %>%
mutate(PL4 = ifelse(IVP35_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP35, LR00014) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "LR00014-PL4-hom-SNP.tsv", col_names = T)
#'
#' LR00.026 x IVP101 (n=4)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(LR00026_GT, IVP101_GT, snps2, 10) %>%
mutate(LR00026 = ifelse(LR00026_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP101, LR00026) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "LR00026-IVP101-hom-SNP.tsv", col_names = T)
#'
#' LR00.026 x IVP35 (n=36)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(LR00026_GT, IVP35_GT, snps2, 10) %>%
mutate(LR00026 = ifelse(LR00026_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP35, LR00026) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "LR00026-IVP35-hom-SNP.tsv", col_names = T)
#'
#' LR00.026 x PL4 (n=35)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(LR00026_GT, PL4_GT, snps2, 10) %>%
mutate(LR00026 = ifelse(LR00026_GT == "0/0/0/0", REF, ALT)) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, PL4, LR00026) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "LR00026-PL4-hom-SNP.tsv", col_names = T)
#'
#' Atlantic x IVP35 (n=5)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(Atlantic_GT, IVP35_GT, snps2, 10) %>%
mutate(Atlantic = ifelse(Atlantic_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP35, Atlantic) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "Atlantic-IVP35-hom-SNP.tsv", col_names = T)
#'
#' Atlantic x PL4 (n=10)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(Atlantic_GT, PL4_GT, snps2, 10) %>%
mutate(Atlantic = ifelse(Atlantic_GT == "0/0/0/0", REF, ALT)) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, PL4, Atlantic) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "Atlantic-PL4-hom-SNP.tsv", col_names = T)
#'
#' Desiree x IVP101 (n=2)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(Desiree_GT, IVP101_GT, snps2, 10) %>%
mutate(Desiree = ifelse(Desiree_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP101, Desiree) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "Desiree-IVP101-hom-SNP.tsv", col_names = T)
#'
#' Desiree x IVP35 (n=2)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(Desiree_GT, IVP35_GT, snps2, 10) %>%
mutate(Desiree = ifelse(Desiree_GT == "0/0/0/0", REF, ALT)) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, IVP35, Desiree) %>%
filter(CHROM %in% sprintf("chr%0.2d", 1:12)) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "Desiree-IVP35-hom-SNP.tsv", col_names = T)
#'
#' Desiree x PL4 (n=6)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
filter_homozygous_vars(Desiree_GT, PL4_GT, snps2, 10) %>%
mutate(Desiree = ifelse(Desiree_GT == "0/0/0/0", REF, ALT)) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT)) %>%
select(CHROM, POS, REF, PL4, Desiree) %>%
filter(CHROM %in% sprintf("chr%0.2d", 1:12)) %>%
rename(Chrom = CHROM, Pos = POS, Ref = REF) %>%
write_tsv(., "Desiree-PL4-hom-SNP.tsv", col_names = T)
#'
#' 93.003 x IVP101 (n=12)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_93003_dihaploids_IVP101 <- parent_snps(snps2, clean_93003_dihaploids_GT, clean_93003_dihaploids_DP, IVP101_GT, IVP101_DP) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT),
clean_93003_dihaploids = ifelse(IVP101_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, IVP101, clean_93003_dihaploids)
write_tsv(clean_93003_dihaploids_IVP101, "clean_93003_dihaploids-IVP101-SNP.tsv", col_names = T)
#'
#' 93.003 x IVP35 (n=21)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_93003_dihaploids_IVP35 <- parent_snps(snps2, clean_93003_dihaploids_GT, clean_93003_dihaploids_DP, IVP35_GT, IVP35_DP) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT),
clean_93003_dihaploids = ifelse(IVP35_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, IVP35, clean_93003_dihaploids)
write_tsv(clean_93003_dihaploids_IVP35, "clean_93003_dihaploids-IVP35-SNP.tsv", col_names = T)
#'
#' 93.003 x PL4 (n=49)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_93003_dihaploids_PL4 <- parent_snps(snps2, clean_93003_dihaploids_GT, clean_93003_dihaploids_DP, PL4_GT, PL4_DP) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT),
clean_93003_dihaploids = ifelse(PL4_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, PL4, clean_93003_dihaploids)
write_tsv(clean_93003_dihaploids_PL4, "clean_93003_dihaploids-PL4-SNP.tsv", col_names = T)
#'
#' C91.640 x IVP101 (n=0)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_C91640_dihaploids_IVP101 <- parent_snps(snps2, clean_C91640_dihaploids_GT, clean_C91640_dihaploids_DP, IVP101_GT, IVP101_DP) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT),
clean_C91640_dihaploids = ifelse(IVP101_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, IVP101, clean_C91640_dihaploids)
write_tsv(clean_C91640_dihaploids_IVP101, "clean_C91640_dihaploids-IVP101-SNP.tsv", col_names = T)
#'
#' C91.640 x IVP35 (n=1)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_C91640_dihaploids_IVP35 <- parent_snps(snps2, clean_C91640_dihaploids_GT, clean_C91640_dihaploids_DP, IVP35_GT, IVP35_DP) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT),
clean_C91640_dihaploids = ifelse(IVP35_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, IVP35, clean_C91640_dihaploids)
write_tsv(clean_C91640_dihaploids_IVP35, "clean_C91640_dihaploids-IVP35-SNP.tsv", col_names = T)
#'
#' C91.640 x PL4 (n=86)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_C91640_dihaploids_PL4 <- parent_snps(snps2, clean_C91640_dihaploids_GT, clean_C91640_dihaploids_DP, PL4_GT, PL4_DP) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT),
clean_C91640_dihaploids = ifelse(PL4_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, PL4, clean_C91640_dihaploids)
write_tsv(clean_C91640_dihaploids_PL4, "clean_C91640_dihaploids-PL4-SNP.tsv", col_names = T)
#'
#' C93.154 x IVP101 (n=24)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_C93154_dihaploids_IVP101 <- parent_snps(snps2, clean_C93154_dihaploids_GT, clean_C93154_dihaploids_DP, IVP101_GT, IVP101_DP) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT),
clean_C93154_dihaploids = ifelse(IVP101_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, clean_C93154_dihaploids, IVP101)
write_tsv(clean_C93154_dihaploids_IVP101, "clean_C93154_dihaploids-IVP101-SNP.tsv", col_names = T)
#'
#' C93.154 x IVP35 (n=88)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_C93154_dihaploids_IVP35 <- parent_snps(snps2, clean_C93154_dihaploids_GT, clean_C93154_dihaploids_DP, IVP35_GT, IVP35_DP) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT),
clean_C93154_dihaploids = ifelse(IVP35_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, IVP35, clean_C93154_dihaploids)
write_tsv(clean_C93154_dihaploids_IVP35, "clean_C93154_dihaploids-IVP35-SNP.tsv", col_names = T)
#'
#' C93.154 x PL4 (n=161)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_C93154_dihaploids_PL4 <- parent_snps(snps2, clean_C93154_dihaploids_GT, clean_C93154_dihaploids_DP, PL4_GT, PL4_DP) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT),
clean_C93154_dihaploids = ifelse(PL4_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, PL4, clean_C93154_dihaploids)
write_tsv(clean_C93154_dihaploids_PL4, "clean_C93154_dihaploids-PL4-SNP.tsv", col_names = T)
#'
#' LR00.022 x IVP101 (n=2)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_LR00022_dihaploids_IVP101 <- parent_snps(snps2, clean_LR00022_dihaploids_GT, clean_LR00022_dihaploids_DP, IVP101_GT, IVP101_DP) %>%
mutate(IVP101 = ifelse(IVP101_GT == "0/0", REF, ALT),
clean_LR00022_dihaploids = ifelse(IVP101_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, IVP101, clean_LR00022_dihaploids)
write_tsv(clean_LR00022_dihaploids_IVP101, "clean_LR00022_dihaploids-IVP101-SNP.tsv", col_names = T)
#'
#' LR00.022 x IVP35 (n=2)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_LR00022_dihaploids_IVP35 <- parent_snps(snps2, clean_LR00022_dihaploids_GT, clean_LR00022_dihaploids_DP, IVP35_GT, IVP35_DP) %>%
mutate(IVP35 = ifelse(IVP35_GT == "0/0", REF, ALT),
clean_LR00022_dihaploids = ifelse(IVP35_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, IVP35, clean_LR00022_dihaploids)
write_tsv(clean_LR00022_dihaploids_IVP35, "clean_LR00022_dihaploids-IVP35-SNP.tsv", col_names = T)
#'
#' LR00.022 x PL4 (n=59)
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
clean_LR00022_dihaploids_PL4 <- parent_snps(snps2, clean_LR00022_dihaploids_GT, clean_LR00022_dihaploids_DP, PL4_GT, PL4_DP) %>%
mutate(PL4 = ifelse(PL4_GT == "0/0", REF, ALT),
clean_LR00022_dihaploids = ifelse(PL4_GT == "0/0", ALT, REF)) %>%
select(Chrom = CHROM, Pos = POS, Ref = REF, PL4, clean_LR00022_dihaploids)
write_tsv(clean_LR00022_dihaploids_PL4, "clean_LR00022_dihaploids-PL4-SNP.tsv", col_names = T)
#'
## ------------------------------------------------------------------------------------------------------------------------------------------------------------
knitr::purl("MM_parent_snps.Rmd", documentation = 2)
|
59c6354851b8ec7b021b422f2985fa56a4fd4e73
|
63370a83deb0209002ede6dd85e8738cfdc1fd6a
|
/man/source_file.Rd
|
681aa8e3f887b6acaa302a0092de8b034880d9be
|
[] |
no_license
|
maurolepore/fgeo.build
|
4e68adcb5cad1710553bf3ef09666d4e7ac9f0cd
|
d1016c25c609ce136ce7961368b2f375cdacde8a
|
refs/heads/main
| 2021-11-10T01:54:15.697149
| 2018-11-25T18:19:24
| 2018-11-25T18:19:24
| 159,061,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 587
|
rd
|
source_file.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_fgeo_source.R
\name{source_file}
\alias{source_file}
\title{Update vector to schedule package installation in correct order.}
\usage{
source_file(file, dir = "../fgeo.install")
}
\arguments{
\item{file}{Path to a file in data-raw/.}
\item{dir}{Path to the directory where \strong{fgeo.install} lives.}
}
\value{
Character vector.
}
\description{
Update vector to schedule package installation in correct order.
}
\examples{
\dontrun{
source_file("scheduled_packages")
source_file("fgeo_packages")
}
}
|
3c9a31fa253ce0109c33ec487a45d17f20114962
|
c26126260131d5de42a198a991630037905d1362
|
/man/get_proportions.Rd
|
00ad98437e755e4adb616bd9bc803dbf6112a87f
|
[
"MIT"
] |
permissive
|
AlkemaLab/fpemlocal
|
88b3d777d3eaa1dd92a91621c89666c7462ca65d
|
3aa538c3329967af391223a169cba7e4adb78ca0
|
refs/heads/master
| 2023-04-12T03:28:17.323380
| 2023-04-04T11:44:10
| 2023-04-04T11:44:10
| 268,617,794
| 0
| 1
|
MIT
| 2023-04-04T11:44:11
| 2020-06-01T19:50:53
|
R
|
UTF-8
|
R
| false
| true
| 560
|
rd
|
get_proportions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fpemreporting.R
\name{get_proportions}
\alias{get_proportions}
\title{Get proportions}
\usage{
get_proportions(posterior_samples, first_year, transformer)
}
\arguments{
\item{posterior_samples}{\emph{\sQuote{Array}} The samples array from \code{\link{fit_fp_csub}}.}
\item{first_year}{`integer` Earliest year represented in the data}
\item{transformer}{`function` Computes the desired result}
}
\value{
`data.frame` Values by year and percentile
}
\description{
Get proportions
}
|
ec7d21786aa38405bed68ec01c9410c15d88a71c
|
0e1204a899a929f6f2087a2727de8f368cbdc6df
|
/R/RSrc/unipath/unipath.R
|
752dce44b030449b38df626399e400981bb90ac2
|
[] |
no_license
|
jvnguyen94/fi_sc_analysis
|
602af4446c40536b8a5336dc975cf0f51a2971e1
|
a1551918a1feb71fbb43fa8c590095a91f0fb747
|
refs/heads/master
| 2023-07-06T13:21:07.070171
| 2021-04-14T19:45:34
| 2021-04-14T19:45:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,390
|
r
|
unipath.R
|
# ------------------------------------------
# Read args
# ------------------------------------------
args = commandArgs(trailingOnly=T)
if(length(args) < 2) {
message("Invalid number of passed arguments.")
}
umi.path <- args[1]
species <- args[2]
thresholds <- args[3]
n <- args[4]
k <- args[5]
plotting <- args[6]
brewer.name <- args[7]
# ------------------------------------------
# seed
# ------------------------------------------
set.seed(1234)
# ------------------------------------------
# install UniPath via GitHub
# https://reggenlab.github.io/UniPathWeb/
# library(devtools)
# install_github("reggenlab/UniPath")
# ------------------------------------------
usePackage <- function(p)
{
if (!is.element(p, installed.packages()[,1]))
install.packages(p, dep = TRUE)
require(p, character.only = TRUE)
}
usePackage("pacman")
# ------------------------------------------
# install dependencies
# ------------------------------------------
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
# BiocManager::install("netbiov")
# BiocManager::install("GenomicRanges")
p_load("vegan")
p_load("FNN")
p_load("igraph")
p_load("preprocessCore")
p_load("GenomicRanges")
p_load("netbiov")
p_load("GenomicRanges")
p_load("RColorBrewer")
# --------------------------------------------
# https://anndata.dynverse.org/index.html
p_load("anndata")
p_load("reticulate")
reticulate::use_python("/opt/anaconda3/bin/python", required = TRUE) # python path
reticulate::py_config()
# --------------------------------------------
# load and install unipath from GitHub
# ------------------------------------------
p_load_gh("reggenlab/UniPath")
# --------------------------------------------
# user UMI
# --------------------------------------------
ad <- read_h5ad("E17_adult_anndata.h5ad")
umi_expression <- t(as.data.frame(as.matrix(ad$X)))
species <- "mouse"
threshold <- 3 # genesets having number of genes greater than the threshold value provided
n <- 4 # number of clusters corresponding to type of cells
k <- 5 # top k nearest neighbor computation
plotting <- T
color.brewer.name <- "Set2"
# --------------------------------------------
# mouse/human
# load null model data matrix
# load symbols/markers
# --------------------------------------------
if (species == "mouse"){
data("mouse_null_model")
data("c5.bp.v6.0.symbols")
# ---------------------------------------
# browns method to combine p-values of null model data matrix (pre-annotated)
# --------------------------------------
# message("Combining p-values...")
Pval <- binorm(mouse_null_data)
combp_ref <- combine(c5.bp.v6.0.symbols, mouse_null_data, rownames(mouse_null_data), Pval, thr=threshold)
# ---------------------------------------
# User-defined expression
# --------------------------------------
Pval1 <- binorm(umi_expression)
combp <- combine(c5.bp.v6.0.symbols, umi_expression ,rownames(umi_expression), Pval1, thr=threshold)
} else if (species == "human") {
data("human_null_model")
data("human_markers")
# ---------------------------------------
# browns method to combine p-values of null model data matrix (pre-annotated)
# --------------------------------------
# message("Combining p-values...")
Pval <- binorm(human_null_data)
combp_ref <- combine(human_markers, human_null_data, rownames(human_null_data), Pval, thr=threshold)
# ---------------------------------------
# User-defined expression
# --------------------------------------
Pval1 <- binorm(umi_expression)
combp <- combine(human_markers, umi_expression ,rownames(umi_expression), Pval1, thr=threshold)
} else {
message("Provide a species of interest.")
}
# ---------------------------------------
# The adjusted p-value matrix (scores$adjpvalog) is referred to as pathway scores.
# --------------------------------------
scores <- adjust(combp, combp_ref)
# save(scores, file = "scores.RData")
load("scores.RData")
# ---------------------------------------
# Pseudo temporal ordering
# TODO: save/return results
# ---------------------------------------
distclust <- dist_clust(scores$adjpvalog, n=n)
dist <- distclust$distance
clusters <- distclust$clusters # cell clusters
index <- index(scores$adjpvalog, k=k)
KNN <- KNN(scores$adjpvalog, index, clusters)
node_class <- class1(clusters, KNN)
distance <- distance(dist, node_class, clusters)
corr_mst <- minimum_spanning_tree(distance) # igraph object
# ---------------------------------------
# plotting
# ---------------------------------------
if (plotting == T){
vertex_color <- brewer.pal(n = n, name = color.brewer.name)
# TODO: fetch cell_labels from anndata
cell_labels <- data.frame(c(rep("E18.5",82), rep("E14.5",44), rep("Adult",46), rep("E16.5",23)))
# mst.plot.mod(corr_mst, vertex.color = vertex_color, mst.edge.col="black",
# bg="white", layout.function="layout.kamada.kawai")
# Note: bug fix but edges don't draw (ok to move on)
UniPath::mst.plot.mod(corr_mst, vertex.color = vertex_color[as.factor(cell_labels[,1])], mst.edge.col="black", bg="white", layout.function="layout.kamada.kawai", v.size = 3, e.size=0.005, mst.e.size = 0.005)
legend("top", legend = sort(unique(cell_labels[,1])), col = vertex_color,pch=20, box.lty=0, cex=0.6, pt.cex=1.5, horiz=T)
# TODO: save/return plot
}
|
0b3be71031f12b42dd1aaef8b65ccb7f1ae47a14
|
6dd8aafec0785a0fb0b1e16b6f70bbb83e3545a5
|
/Machine Learning/Matrix Factorization.R
|
e9459a018c40ed01c9252224b7ee0d2e8b2f7858
|
[] |
no_license
|
jwwikstrom/DS-HarvardX
|
22904bc8390a869504153b086b1e60d20f85a480
|
e8dde57671864e8edd58ac2f113cd29ad6276ea4
|
refs/heads/master
| 2021-07-20T09:37:48.769002
| 2020-05-19T13:13:42
| 2020-05-19T13:13:42
| 165,712,485
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,911
|
r
|
Matrix Factorization.R
|
set.seed(1987)
n <- 100
k <- 8
Sigma <- 64 * matrix(c(1, .75, .5, .75, 1, .5, .5, .5, 1), 3, 3)
m <- MASS::mvrnorm(n, rep(0, 3), Sigma)
m <- m[order(rowMeans(m), decreasing = TRUE),]
y <- m %x% matrix(rep(1, k), nrow = 1) + matrix(rnorm(matrix(n*k*3)), n, k*3)
colnames(y) <- c(paste(rep("Math",k), 1:k, sep="_"),
paste(rep("Science",k), 1:k, sep="_"),
paste(rep("Arts",k), 1:k, sep="_"))
my_image <- function(x, zlim = range(x), ...){
colors = rev(RColorBrewer::brewer.pal(9, "RdBu"))
cols <- 1:ncol(x)
rows <- 1:nrow(x)
image(cols, rows, t(x[rev(rows),,drop=FALSE]), xaxt = "n", yaxt = "n",
xlab="", ylab="", col = colors, zlim = zlim, ...)
abline(h=rows + 0.5, v = cols + 0.5)
axis(side = 1, cols, colnames(x), las = 2)
}
my_image(y)
my_image(cor(y), zlim = c(-1,1))
range(cor(y))
axis(side = 2, 1:ncol(y), rev(colnames(y)), las = 2)
s <- svd(y)
names(s)
y_svd <- s$u %*% diag(s$d) %*% t(s$v)
max(abs(y - y_svd))
ss_y <- colSums((y)^2)
y_yv <- y_svd %*% s$v
ss_yv <- colSums((y_yv)^2)
sum(ss_y)
sum(ss_yv)
plot(ss_y,1:24)
plot(ss_yv,1:24)
plot(sqrt(ss_yv),s$d)
identical(s$u %*% diag(s$d), sweep(s$u, 2, s$d, FUN = "*"))
identical(s$u %*% diag(s$d), sweep(s$u, 2, s, FUN = "*"))
student_mean <- rowMeans(y)
ud <- s$u %*% diag(s$d)
plot(student_mean,ud[,1])
image(s$v)
my_image(s$v)
u1d1v1 <- s$u[,1] %*% t(s$v[,1]) * s$d[1]
my_image(u1d1v1)
resid <- y - with(s,(u[, 1, drop=FALSE]*d[1]) %*% t(v[, 1, drop=FALSE]))
my_image(cor(resid), zlim = c(-1,1))
axis(side = 2, 1:ncol(y), rev(colnames(y)), las = 2)
resid <- y - with(s,sweep(u[, 1:2], 2, d[1:2], FUN="*") %*% t(v[, 1:2]))
my_image(cor(resid), zlim = c(-1,1))
axis(side = 2, 1:ncol(y), rev(colnames(y)), las = 2)
resid <- y - with(s,sweep(u[, 1:3], 2, d[1:3], FUN="*") %*% t(v[, 1:3]))
my_image(cor(resid), zlim = c(-1,1))
axis(side = 2, 1:ncol(y), rev(colnames(y)), las = 2)
|
79d37c011a8aecd3f058c2c2221256e1892816db
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/astroFns/examples/ut2lst.Rd.R
|
727d6ec74f01c862ff387797303597045ebc700b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 792
|
r
|
ut2lst.Rd.R
|
library(astroFns)
### Name: ut2lst
### Title: Universal time to local sidereal time or hour angle
### Aliases: ut2lst ut2ha
### Keywords: chron
### ** Examples
# LST at UT1 midnight on the first of every month for Green Bank, WV, USA
midLST <- ut2lst(yr = 2012, mo = 1:12, dy = 1, hr = 0, mi = 0, se = 0,
lon.obs="W 79d 50.5m")
str(midLST)
midLST
# LST at EST midnight on the first of every month for Green Bank, WV, USA
# (EST = UT1-5 hours)
midLST <- ut2lst(yr = 2012, mo = 1:12, dy = 1, hr = -5, mi = 0, se = 0,
lon.obs="W 79d 50.5m")
str(midLST)
midLST
# LST in Green Bank, WV, USA, now, and 12 hours from now.
ut2lst(Sys.time())
ut2lst(Sys.time() + 12*3600)
# Hour angle of 3C286 in Green Bank now (using function defaults)
ut2ha(Sys.time())
|
b9f933830357bc9eef8724f62dc131ef2582917c
|
ab5f335d1dfc44c2f16f4adc7e80a9220e9b4097
|
/rscripts/data_sharing_network_dat.R
|
6dec12a6dcd7a4afe98bd3c6cc842ab2a1703e41
|
[] |
no_license
|
CoWy-ASA/RiverWatch
|
857bbe2229730fcdb29a6aa8193e67784f7a06f3
|
1147adb9965215843e60c44f7703e79a84740495
|
refs/heads/master
| 2021-01-22T23:16:26.662295
| 2015-08-04T20:09:48
| 2015-08-04T20:09:48
| 33,345,992
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,591
|
r
|
data_sharing_network_dat.R
|
### look at data from data sharing network
### mjp
library(ggplot2)
dat <- read.csv("data/dsn_pull/rf-1990-2015.csv", header = TRUE, as.is = TRUE)
sites <- read.csv("data//dsn_pull/rf_sites.csv", header = TRUE, as.is = TRUE )
dat$date <- as.Date(dat$Activity.Start.Date, format = "%m/%d/%Y")
dat$Result.Value <- as.numeric(dat$Result.Value)
dat_dt <- as.tbl(dat)
select(dat_dt, distinct(as.character(Monitoring.Location.ID) )
#events <- unique(fields[, names(fields) %in% c( "Event.", "Org.Name", "Stn.", "date" )] )
events <- unique( dat[ , names(dat) %in% c("Activity.ID", "date", "Monitoring.Location.ID", "Monitoring.Location.Name" ) ] )
events <- as.tbl(events)
events_by_sites2 <- group_by( events, Monitoring.Location.ID )%>%
summarize( n_events = n_distinct(date),
earliest = min(date),
latest = max(date)) %>%
arrange( desc(n_events))
## eliminate sites with fewer than 25 samples
events_by_sites2 <- filter(events,date >= as.Date("1/1/2000", format = "%m/%d/%Y") ) %>% group_by(Monitoring.Location.ID )%>%
summarize( n_events = n_distinct(date),
earliest = min(date),
latest = max(date)) %>%
arrange( desc(n_events))
main_sites <- filter(events_by_sites2, n_events > 25 ) %>% select(Monitoring.Location.ID)
### examine date range with plot
filter(events, Monitoring.Location.ID %in% main_sites$Monitoring.Location.ID) %>% ggplot( aes(x = date, y = Monitoring.Location.Name )) + geom_point()
### after viewing, focus on date collected after 2000 more than 25 events
filter(events, Monitoring.Location.ID %in% main_sites$Monitoring.Location.ID & date >= as.Date("1/1/2000", format = "%m/%d/%Y") ) %>%
ggplot( aes(x = date, y = Monitoring.Location.Name )) + geom_point()
### so what is measured during these events?
### filter dat for only main sites and data after 2000
analytes <- filter(dat_dt, Monitoring.Location.ID %in% main_sites$Monitoring.Location.ID & date >= as.Date("1/1/2000", format = "%m/%d/%Y") ) %>%
group_by(Characteristic.Name) %>%
summarise(n = n(), min = min(Result.Value), max = max(Result.Value), n_zero = sum(Result.Value == 0 ), n_detects = sum(Result.Value > 0 ) ) %>%
arrange(desc(n))
filter(dat_dt, Monitoring.Location.ID %in% main_sites$Monitoring.Location.ID & date >= as.Date("1/1/2000", format = "%m/%d/%Y") & Characteristic.Name == "Iron" ) %>%
ggplot( aes(x = date, y = log(Result.Value)) ) + facet_wrap( ~ Monitoring.Location.Name, scales = "free_y") + geom_point()
|
ab22b4d5a07203b68867afb4ea96638573a67191
|
104b494275bfbcdc9aa690846cb5136709765a29
|
/treeStats.R
|
857cfe214ed7f76480125cc48e781ebeeff43728
|
[] |
no_license
|
sellisd/IES
|
d3634f487e27aec90ebe7107503ce17355a1b118
|
04b6f1dc7a0be8c0e48899f4acf3ce7a352bd91e
|
refs/heads/master
| 2021-01-17T04:01:50.070493
| 2018-06-01T21:17:23
| 2018-06-01T21:17:23
| 24,942,238
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,212
|
r
|
treeStats.R
|
# calculate statistics on trees, the total branch length, number of genes and whether they have a character matrix
library(ape)
treePath <- "~/data/IES_data/msas/phyldog/results/"
load("~/data/IES_data/rdb/charMats")
clusters <- dir(path = treePath, pattern = "*.ReconciledTree$")
clusters <- gsub(pattern = ".ReconciledTree", replacement = "", clusters, fixed = TRUE)
n <- length(clusters)
treeStatsDF <- data.frame(cluster = character(n), branchLength = numeric(n), ngenes = numeric(n), hasCharMat = logical(n), stringsAsFactors = FALSE)
counter <- 1
for(cluster in clusters){
cat(counter, "/", length(clusters),"\r")
tr <- read.tree(file = paste0(treePath, cluster, ".ReconciledTree"))
ngenes <- length(tr$tip.label)
totalBrLength <- sum(tr$edge.length)
treeStatsDF[counter, "cluster"] <- cluster
treeStatsDF[counter, "branchLength"] <- totalBrLength
treeStatsDF[counter, "ngenes"] <- ngenes
if(cluster %in% charMats$cluster[charMats$ies!=0]){
# there is a character matrix with at least one IES
treeStatsDF[counter, "hasCharMat"] <- TRUE
}else{
treeStatsDF[counter, "hasCharMat"] <- FALSE
}
counter <- counter + 1
}
save(treeStatsDF, file = "~/data/IES_data/rdb/treeStats")
|
bb6167b9aae565d887a1c13db3f2ed565a27a8f4
|
802fc356f77e6e0f7ade3c14b00218821b253101
|
/plot4.R
|
4b7d38a559289f2472d67d3542ad8aabb7cb8e01
|
[] |
no_license
|
mbcmn/ExData_Plotting1
|
4bfd79a27a2f94757dbee46bf2331d7deafde766
|
872e8ef7134cb194b4a2d644924406c486de34d2
|
refs/heads/master
| 2020-08-06T12:26:21.165639
| 2019-12-30T17:34:45
| 2019-12-30T17:34:45
| 212,974,888
| 0
| 0
| null | 2019-10-05T09:33:23
| 2019-10-05T09:33:22
| null |
UTF-8
|
R
| false
| false
| 1,542
|
r
|
plot4.R
|
# Read txt file into R
powercons <- read.csv("household_power_consumption.txt", header = TRUE, sep =";", na.strings = "?")
# Convert date and time columns into date and time format
powercons$datetime <- strptime(paste(powercons$Date, powercons$Time), format = "%d/%m/%Y %H:%M:%S")
# Subset for two first days of February 2007
powercons <- powercons[powercons$Date == "1/2/2007" | powercons$Date == "2/2/2007", ]
# Create Plot 4 and save as png
png("plot4.png", width = 480, height = 480, units = "px")
attach(powercons)
par(mfcol=c(2,2))
# Plot 1
plot(powercons$datetime,powercons$Global_active_power, xlab="", ylab = "Global Active Power (kilowatts)", type = "l")
lines(powercons$Global_active_power)
axis(side=1,at=c(0,1441,2881),labels=c('Thu','Fri','Sat'), tick=TRUE)
# Plot 2
plot(powercons$datetime, powercons$Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "n")
lines(powercons$datetime, powercons$Sub_metering_1, col = "grey")
lines(powercons$datetime, powercons$Sub_metering_2, col = "red")
lines(powercons$datetime, powercons$Sub_metering_3, col = "blue")
axis(side=1,at=c(0,1441,2881),labels=c('Thu','Fri','Sat'), tick=TRUE)
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("grey", "red", "blue"), lty=1)
# Plot 3
plot(datetime, Voltage,xlab = "datetime", ylab = "Voltage", type = "n")
lines(datetime, Voltage)
#Plot 4
plot(datetime, Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "n")
lines(datetime, Global_reactive_power)
dev.off()
|
9afed3145b6e47eed12ce4671153d6fd46820430
|
dd953a24d6aba1c5d5a6e81c04fc3c91cc9b5ae4
|
/R/Create.actor.youtube.R
|
4a3421509f4779f8a4c4031c016e4a9acb1a07d2
|
[] |
no_license
|
cran/vosonSML
|
e63e665d01fc0bba576beebc3f13d73676b38eba
|
c8d486b70237e725ca232f06e97c07ea4eefe2c8
|
refs/heads/master
| 2022-09-07T13:04:54.440490
| 2022-08-16T12:00:01
| 2022-08-16T12:00:01
| 145,896,556
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,614
|
r
|
Create.actor.youtube.R
|
#' @title Create YouTube actor network
#'
#' @description Creates a YouTube actor network from comment threads on YouTube videos. Users who have made comments to
#' a video (top-level comments) and users who have replied to those comments are actor nodes. The comments are
#' represented as directed edges between the actors. The video id is also included as an actor node, representative of
#' the videos publisher with top-level comments as directed edges towards them.
#'
#' @param datasource Collected social media data with \code{"datasource"} and \code{"youtube"} class names.
#' @param type Character string. Type of network to be created, set to \code{"actor"}.
#' @param ... Additional parameters passed to function. Not used in this method.
#'
#' @return Network as a named list of two dataframes containing \code{$nodes} and \code{$edges}.
#'
#' @examples
#' \dontrun{
#' # create a YouTube actor network graph
#' actorNetwork <- youtubeData |> Create("actor")
#'
#' # network
#' # actorNetwork$nodes
#' # actorNetwork$edges
#' }
#'
#' @export
Create.actor.youtube <- function(datasource, type, ...) {
msg("Generating YouTube actor network...\n")
# nodes are authors and videos, edges are comments and self-loops
parent_authors <-
datasource |> dplyr::select(.data$CommentID, .data$AuthorChannelID) |>
dplyr::distinct(.data$CommentID, .keep_all = TRUE) |>
dplyr::rename("ParentID" = .data$CommentID,
"ParentAuthorID" = .data$AuthorChannelID)
df_relations <- datasource |>
dplyr::left_join(parent_authors, by = c("ParentID")) |>
dplyr::select(
.data$AuthorChannelID,
.data$ParentID,
.data$ParentAuthorID,
.data$VideoID,
.data$CommentID
) |>
dplyr::mutate(edge_type = dplyr::case_when((!is.na(.data$ParentID)) ~ "reply-comment", TRUE ~ "comment")) |>
dplyr::mutate(
to = dplyr::if_else(
.data$edge_type == "reply-comment",
.data$ParentAuthorID,
dplyr::if_else(
.data$edge_type == "comment",
paste0("VIDEOID:", .data$VideoID),
as.character(NA)
)
)
) |>
dplyr::rename(
"from" = .data$AuthorChannelID,
"video_id" = .data$VideoID,
"comment_id" = .data$CommentID
) |>
dplyr::select(.data$from,
.data$to,
.data$video_id,
.data$comment_id,
.data$edge_type)
df_nodes <-
datasource |> dplyr::select(.data$AuthorChannelID, .data$AuthorDisplayName) |>
dplyr::distinct(.data$AuthorChannelID, .keep_all = TRUE) |>
dplyr::mutate(node_type = "actor") |>
dplyr::rename("id" = .data$AuthorChannelID,
"screen_name" = .data$AuthorDisplayName)
video_ids <-
datasource |> dplyr::distinct(.data$VideoID) |> dplyr::mutate(id = paste0("VIDEOID:", .data$VideoID)) |>
dplyr::rename(video_id = .data$VideoID)
df_relations <- dplyr::bind_rows(
df_relations,
video_ids |> dplyr::mutate(
from = .data$id,
to = .data$id,
edge_type = "self-loop",
id = NULL
)
)
video_ids <- video_ids |> dplyr::select(-.data$video_id)
if (nrow(video_ids)) {
video_ids <- video_ids |> dplyr::mutate(node_type = "video")
df_nodes <-
dplyr::bind_rows(df_nodes, dplyr::anti_join(video_ids, df_nodes, by = "id"))
}
net <- list("nodes" = df_nodes, "edges" = df_relations)
class(net) <- append(class(net), c("network", "actor", "youtube"))
msg("Done.\n")
net
}
|
18fc8ab927461ade5461f3f9a979a209978e439f
|
44cf65e7ab4c487535d8ba91086b66b0b9523af6
|
/data/Newspapers/2002.02.21.editorial.79636.0823.r
|
23a7f81c0a1c0783a308e3e304a5d8fdb8f0516c
|
[] |
no_license
|
narcis96/decrypting-alpha
|
f14a746ca47088ec3182d610bfb68d0d4d3b504e
|
5c665107017922d0f74106c13d097bfca0516e66
|
refs/heads/master
| 2021-08-22T07:27:31.764027
| 2017-11-29T12:00:20
| 2017-11-29T12:00:20
| 111,142,761
| 0
| 1
| null | null | null | null |
MacCentralEurope
|
R
| false
| false
| 3,579
|
r
|
2002.02.21.editorial.79636.0823.r
|
institutiile n - au nici blana , nici coada , n - au nici bataturi in talpa si nu fac nici diaree .
sint doar niste cladiri mai fatoase si niste conventii intre noi , biete fiinte trecatoare .
si atunci , vrind - nevrind , iti vine sa te intrebi , oare de ce a sarit Parchetul General ( de pe linga Curtea Suprema de Justitie ) ca un magar caruia cineva i - a strecurat un chistoc aprins in pilnia urechii ?
una - doua , ca la " Foc ! " , Parchetul a dat un comunicat prin care neaga ca ar fi " fost autorizata inregistrarea convorbirilor telefonice ale ziaristilor " .
n - am afirmat niciodata ca institutia cu ureche lunga ar fi autorizat o astfel de operatiune .
atita ar mai trebui !
dar faptul ca n - a fost emis nici un mandat pentru interceptarea telefoanelor ziaristilor nu inseamna citusi de putin ca acestea nu au fost inregistrate si raportate primului - ministru .
de ce ?
pentru ca Parchetul General nu afirma nicaieri ca , in urmarirea posibililor autori ai raportului Armagedon II , nu s - ar fi autorizat nici o ascultare de telefoane .
am convingerea ca acest lucru s - a produs .
de ce ?
pentru ca imediat dupa ce am facut declaratiile legate de ascultarea telefoanelor , institutia domnului Joita a spus ca totul a fost legal ( trec zimbitor peste acuzatia ca as incerca discreditarea institutiilor statului ) .
daca era curata si fara musca pe caciula , procuratura spunea frumos " noi nu am autorizat nimic si vom verifica serios daca o asemenea interceptare s - a facut de catre vreo alta institutie sau persoana " .
dar a raspuns fuga - fuga fara sa cerceteze nimic .
si asta pentru ca " procurorii " stiau foarte bine ca nu s - a emis nici un mandat pentru ziaristi .
insa au fost ascultati altii , iar ziaristii , in exercitarea profesiei lor , i - au sunat pe acestia .
cei care au raportat despre interceptarea urmaritilor i - au mentionat si pe jurnalisti si au trimis raportul pe filiera , pina a ajuns si la primul - ministru Adrian Nastase .
numai o comisie parlamentara ne - ar putea confirma sau infirma daca in cazul Armagedon II au fost emise mandate de ascultare .
abia atunci am fi in situatia de a discuta daca procurorii au respectat legea ori s - a comis un abuz .
acest lucru trebuie limpezit , dupa cum trebuie limpezit inca un fapt .
cum de s - a grabit atunci Parchetul domnului Joita " sa livreze " catre atitea televiziuni banda video cu inregistrarea lui Ovidiu Iane la politie ?
o fi Joita mai destept decit coana Joitica a lui Caragiale , dar de pe vremea cind Parchetul era bratul de otel al lui Nicolae Ceausescu nu ne - au mai fost oferite asemenea dovezi acuzatoare .
in acele zile de stingu - n dreptuí , Parchetul a functionat pe post de instrument de propaganda pentru apararea primului - ministru .
fie si numai din acest comportament straniu si tot am fi fost obligati sa banuim Parchetul de comunicate gogonate , daramite acum , cind mai apare si codita telefoanelor .
povestea ascultarii telefoanelor pune in discutie un drept† fundamental al romanilor .
au fost ascultati cei urmariti si cu ce drept ?
daca nu s - a semnat nici un mandat pentru interceptarea convorbirilor lui Mugur Ciuvica , Ovidiu Iane si apropiatilor acestora , atunci spaima mea este si mai mare .
orice Bula cu niste grade prin cine stie ce servicii speciale poate asculta pe oricine cind are chef .
daca s - au emis totusi mandate de interceptare pe numele unor " impricinati " , domnul Joita si ai sai trebuie sa dea socoteala .
indiferent cite exemplare ar mai exista , vremea coanei Joitica a† cam trecut !
|
af0c084a45b70c41ad474592653b8f1578c27563
|
bdb80bd3620d159911090501977e63e2c16fc7a2
|
/plot2.R
|
74e7005af63b2cba83d019f442a6052c76f00c79
|
[] |
no_license
|
EMCE777/ExData_Plotting1
|
e1ff9c82dce7e49036a65d0371fc5a94a1c7a93b
|
9b53aceb7313ea35ecc11bd85b7429b99efd9458
|
refs/heads/master
| 2020-04-01T07:54:46.900548
| 2018-10-14T23:24:55
| 2018-10-14T23:24:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,053
|
r
|
plot2.R
|
# Examine how household energy usage varies over a 2-day period in February, 2007.
# To obtein data from source
houseData <- read.table(file="./Project1/household_power_consumption.txt",
header = T, sep=";", na.strings = "?")
# Transform data type from Date variable
houseData$Date <- as.Date(houseData$Date, format="%d/%m/%Y")
# Filter data why two dates
houseData <- subset(houseData,
subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
# Add new variable to keep datatime
houseData$DateTime <-
strptime(paste(houseData$Date, houseData$Time), "%Y-%m-%d %H:%M:%S")
# Build frame with labels
plot(x=houseData$DateTime, y=houseData$Global_active_power,
type = "n", ylab = "Global Active Power (kilowatts)", xlab="")
# Build lines in frame in accord with the data
lines(x=houseData$DateTime, y=houseData$Global_active_power, type = "l")
# To send to PNG device
dev.copy(png,file="plot2.png", height = 480, width = 480)
#Close device
dev.off()
|
f45e16d533bef17a171d54de885536656473fa66
|
98f11f4107c2fd916e6285bb685dc508e6a47133
|
/ui.R
|
5dad9a8785a4fa5eae08eec167e05ea21982dc9c
|
[
"MIT"
] |
permissive
|
peppy/2019-ncov-japan
|
b121682f8e9021506a1edd4cece0b2fbcd0ca403
|
aa56daf6eb4016ba76818592fa982988ebf0a3eb
|
refs/heads/master
| 2022-07-27T14:37:14.328844
| 2020-05-14T00:54:46
| 2020-05-14T00:54:46
| 263,784,571
| 2
| 0
|
MIT
| 2020-05-14T01:36:07
| 2020-05-14T01:29:13
| null |
UTF-8
|
R
| false
| false
| 6,872
|
r
|
ui.R
|
source(
file = "global.R",
local = T,
encoding = "UTF-8"
)
shinyUI(
dashboardPagePlus(
skin = "red",
title = i18n$t("新 型 コ ロ ナ ウ イ ル ス 感 染 速 報"),
header = dashboardHeaderPlus(
title = paste0("🦠 ", i18n$t("新 型 コ ロ ナ ウ イ ル ス 感 染 速 報")),
titleWidth = 600,
enable_rightsidebar = F
),
# TODO 言語設定の追加
sidebar = dashboardSidebar(sidebarMenu(
id = "sideBarTab",
menuItem(
i18n$t("感染速報"),
tabName = "japan",
icon = icon("tachometer-alt"),
badgeLabel = i18n$t("実況中"),
badgeColor = "red"
),
menuItem(
i18n$t("感染ルート"),
tabName = "route",
icon = icon("project-diagram"),
badgeLabel = i18n$t("開発中"),
badgeColor = "black"
),
menuItem(
i18n$t("自治体状況"),
tabName = "prefStatus",
icon = icon("city"),
menuSubItem(
text = i18n$t("北海道"),
tabName = "hokkaido",
icon = icon("fish")
),
menuSubItem(
text = i18n$t("青森県"),
tabName = "aomori",
icon = icon("apple-alt")
),
menuSubItem(
text = i18n$t("岩手県"),
tabName = "iwate" # ,
# icon = icon('apple-alt')
),
menuSubItem(
text = i18n$t("宮城県"),
tabName = "miyagi" # ,
# icon = icon('apple-alt')
),
menuSubItem(
text = i18n$t("茨城県"),
tabName = "ibaraki" # ,
# icon = icon('apple-alt')
),
menuSubItem(
text = i18n$t("神奈川県"),
tabName = "kanagawa" # ,
# icon = icon('apple-alt')
)
),
menuItem(
i18n$t("事例マップ"),
tabName = "caseMap",
icon = icon("map-marked-alt"),
badgeLabel = i18n$t("破棄"),
badgeColor = "black"
),
menuItem(
i18n$t("状況分析"),
tabName = "academic",
icon = icon("eye"),
badgeLabel = "V 0.1",
badgeColor = "black"
),
menuItem(
# Google
i18n$t("自粛効果"),
tabName = "google",
icon = icon("google"),
badgeLabel = "V 0.1",
badgeColor = "black"
),
menuItem(
i18n$t("サイトについて"),
tabName = "about",
icon = icon("readme"),
badgeLabel = i18n$t("開発中"),
badgeColor = "black"
)
)),
dashboardBody(
tags$head(
tags$link(rel = "icon", href = "favicon.ico"),
tags$meta(name = "twitter:card", content = "summary_large_image"),
# tags$meta(property = 'og:url', content = 'https://covid-2019.live/'),
tags$meta(property = "og:title", content = "🦠新型コロナウイルス感染速報"),
tags$meta(property = "og:description", content = "日本における新型コロナウイルスの最新感染・罹患情報をいち早く速報・まとめるサイトです。"),
tags$meta(property = "og:image", content = "https://repository-images.githubusercontent.com/237152814/77329f80-917c-11ea-958c-731c8433c504")
),
tabItems(
tabItem(
tabName = "japan",
source(
file = paste0(PAGE_PATH, "Main/Main.ui.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "route",
source(
file = paste0(PAGE_PATH, "Route.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "hokkaido",
source(
file = paste0(PAGE_PATH, "Pref/Hokkaido-UI.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "aomori",
source(
file = paste0(PAGE_PATH, "Pref/Aomori-UI.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "iwate",
source(
file = paste0(PAGE_PATH, "Pref/Iwate-UI.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "miyagi",
source(
file = paste0(PAGE_PATH, "Pref/Miyagi-UI.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "ibaraki",
source(
file = paste0(PAGE_PATH, "Pref/Ibaraki-UI.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "kanagawa",
source(
file = paste0(PAGE_PATH, "Pref/Kanagawa-UI.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "caseMap",
source(
file = paste0(PAGE_PATH, "CaseMap.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "academic",
source(
file = paste0(PAGE_PATH, "/Academic/Academic.ui.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "google",
source(
file = paste0(PAGE_PATH, "/Google/PrefMobility.ui.R"),
local = T,
encoding = "UTF-8"
)$value
),
tabItem(
tabName = "about",
fluidRow(
column(
width = 12,
boxPlus(
width = 12,
collapsible = F,
fluidRow(
column(
width = 12,
tagList(
includeMarkdown(paste0("README", ifelse(languageSetting == "ja", "", paste0(".", languageSetting)), ".md"))
)
)
)
)
)
)
)
)
),
footer = dashboardFooter(
left_text = tagList(userPost(
id = 1,
src = "profile.png",
author = tagList(
tags$small("Developed by"),
"Su Wei"
),
collapsible = F,
description = "Front-End Engineer | ex-Bioinformatician"
)),
right_text = tagList(
tags$div(
style = "font-size:22px;letter-spacing: .3rem;",
tags$a(href = "https://github.com/swsoyee/2019-ncov-japan", icon("github")),
tags$a(href = "https://twitter.com/swsoyee", icon("twitter")),
tags$a(href = "https://www.linkedin.com/in/infinityloop/", icon("linkedin"))
)
)
)
)
)
|
eee15ef8c435a81c72edf776c440dcf5bd4eb1de
|
d859174ad3cb31ab87088437cd1f0411a9d7449b
|
/autonomics.find/man/infer_contrast_names.Rd
|
927ad661ff2f2a0f58dd56ba72af4239e70f2fbe
|
[] |
no_license
|
bhagwataditya/autonomics0
|
97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c
|
c7ca7b69161e5181409c6b1ebcbeede4afde9974
|
refs/heads/master
| 2023-02-24T21:33:02.717621
| 2021-01-29T16:30:54
| 2021-01-29T16:30:54
| 133,491,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 463
|
rd
|
infer_contrast_names.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/infer_contrasts.R
\name{infer_contrast_names}
\alias{infer_contrast_names}
\title{Infer contrast names}
\usage{
infer_contrast_names(object)
}
\arguments{
\item{object}{eset}
}
\value{
character vector
}
\description{
Infer contrast names
}
\examples{
require(magrittr)
if (require(subramanian.2016)){
subramanian.2016::exiqon \%>\%
autonomics.find::infer_contrast_names()
}
}
|
cf7b86b998d4d1eba74bdba51164796e17e266f1
|
2b2eb91afad071c939bbb1c251e5ee87dca7e709
|
/inst/unitTests/nodeAndEdgeData_test.R
|
0aea9cbd35bc461e77d1e5703946414c429ded33
|
[] |
no_license
|
vgpprasad91/graph
|
7a255c5652bf43722e16020aa4e82f01c43533fa
|
f8cad8e67dffc73106c918ace0904f109255ea1f
|
refs/heads/master
| 2021-01-24T09:01:12.583974
| 2017-06-05T12:19:34
| 2017-06-05T12:19:34
| 93,400,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,721
|
r
|
nodeAndEdgeData_test.R
|
#
# Test setup
#
simpleInciMat <- function() {
## Here's a simple graph for testing
## a b
## |\ /|
## | \___c___/ |
## | | |
## \ | /
## \____d____/
##
##
mat <- matrix(c(0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0),
byrow=TRUE, ncol=4)
rownames(mat) <- letters[1:4]
colnames(mat) <- letters[1:4]
mat
}
simpleDirectedGraph <- function() {
## Here's a simple graph for testing
## a b
## |\ /^
## | \__>c<__/ |
## | ^ |
## \ | /
## \___>d____/
##
##
mat <- matrix(c(0, 0, 1, 1,
0, 0, 1, 0,
0, 0, 0, 0,
0, 1, 1, 0),
byrow=TRUE, ncol=4)
rownames(mat) <- letters[1:4]
colnames(mat) <- letters[1:4]
mat
new("graphAM", adjMat=mat, edgemode="directed")
}
testNodeDataDefaults <- function() {
mat <- simpleInciMat()
g1 <- new("graphAM", adjMat=mat)
## If no attributes have been defined, empty list.
checkEquals(list(), nodeDataDefaults(g1))
## Can assign a named list
myEdgeAttributes <- list(foo=1, bar="blue")
nodeDataDefaults(g1) <- myEdgeAttributes
checkEquals(myEdgeAttributes, nodeDataDefaults(g1))
checkEquals(myEdgeAttributes$foo, nodeDataDefaults(g1, attr="foo"))
nodeDataDefaults(g1, attr="size") <- 400
checkEquals(400, nodeDataDefaults(g1, attr="size"))
checkException(nodeDataDefaults(g1, attr="NOSUCHATTRIBUTE"), silent=TRUE)
checkException(nodeDataDefaults(g1) <- list(1, 3, 4),
silent=TRUE) ## must have names
}
testEdgeDataDefaults <- function() {
mat <- simpleInciMat()
g1 <- new("graphAM", adjMat=mat)
## If no attributes have been defined, empty list.
checkEquals(list(), edgeDataDefaults(g1))
## Can assign a named list
myEdgeAttributes <- list(foo=1, bar="blue")
edgeDataDefaults(g1) <- myEdgeAttributes
checkEquals(myEdgeAttributes, edgeDataDefaults(g1))
checkEquals(myEdgeAttributes$foo, edgeDataDefaults(g1, attr="foo"))
edgeDataDefaults(g1, attr="size") <- 400
checkEquals(400, edgeDataDefaults(g1, attr="size"))
checkException(edgeDataDefaults(g1, attr="NOSUCHATTRIBUTE"), silent=TRUE)
checkException(edgeDataDefaults(g1) <- list(1, 3, 4),
silent=TRUE) ## must have names
}
testNodeDataGetting <- function() {
mat <- simpleInciMat()
g1 <- new("graphAM", adjMat=mat)
myAttributes <- list(size=1, dim=c(3, 3), name="fred")
nodeDataDefaults(g1) <- myAttributes
checkEquals("fred", nodeData(g1, "a", attr="name")[[1]])
someNodes <- c("a", "b")
expect <- as.list(c(1, 1))
names(expect) <- someNodes
checkEquals(expect, nodeData(g1, n=someNodes, attr="size"))
expect <- as.list(rep("fred", length(nodes(g1))))
names(expect) <- nodes(g1)
checkEquals(expect, nodeData(g1, attr="name"))
checkEquals(myAttributes, nodeData(g1, n="a")[[1]])
everything <- nodeData(g1)
for (alist in everything)
checkEquals(myAttributes, alist)
}
testNodeDataSetting <- function() {
mat <- simpleInciMat()
g1 <- new("graphAM", adjMat=mat)
myAttributes <- list(size=1, dim=c(3, 3), name="fred")
nodeDataDefaults(g1) <- myAttributes
## unknown node is error
checkException(nodeData(g1, n="UNKNOWN_NODE", attr="size") <- 5, silent=TRUE)
## unknown attr is error
checkException(nodeData(g1, n="a", attr="UNKNOWN") <- 5, silent=TRUE)
nodeData(g1, n="a", attr="size") <- 5
checkEquals(5, nodeData(g1, n="a", attr="size")[[1]])
nodeData(g1, n=c("a", "b", "c"), attr="size") <- 50
expect <- myAttributes
expect[["size"]] <- 50
checkEquals(list(a=expect, b=expect, c=expect),
nodeData(g1, n=c("a", "b", "c")))
nodeData(g1, n=c("a", "b", "c"), attr="size") <- c(1, 2, 3)
checkEquals(c(1, 2, 3),
as.numeric(nodeData(g1, n=c("a", "b", "c"), attr="size")))
nodeData(g1, attr="name") <- "unknown"
expect <- as.list(rep("unknown", length(nodes(g1))))
names(expect) <- nodes(g1)
checkEquals(expect, nodeData(g1, attr="name"))
}
testEdgeDataGetting <- function() {
mat <- simpleInciMat()
g1 <- new("graphAM", adjMat=mat)
myAttributes <- list(size=1, dim=c(3, 3), name="fred")
edgeDataDefaults(g1) <- myAttributes
checkEquals("fred", edgeData(g1, from="a", to="d", attr="name")[[1]])
fr <- c("a", "b")
to <- c("c", "c")
expect <- as.list(c(1, 1))
names(expect) <- c("a|c", "b|c")
checkEquals(expect, edgeData(g1, fr, to, attr="size"))
expect <- rep("fred", sum(sapply(edges(g1), length)))
checkEquals(expect, as.character(edgeData(g1, attr="name")))
checkEquals(myAttributes, edgeData(g1, from="a", to="c")[[1]])
everything <- edgeData(g1)
for (alist in everything)
checkEquals(myAttributes, alist)
got <- edgeData(g1, from="d", attr="size")
checkEquals(3, length(got))
checkEquals(rep(1, 3), as.numeric(got))
got <- edgeData(g1, to="d", attr="size")
checkEquals(3, length(got))
checkEquals(rep(1, 3), as.numeric(got))
expect <- c("a|c", "a|d", "d|a", "d|b", "d|c")
checkEquals(expect, names(edgeData(g1, from=c("a", "d"), attr="name")))
}
testEdgeDataToOnlyUndir <- function() {
mat <- simpleInciMat()
mat[1, 3] <- mat[3, 1] <- 100
mat[1, 4] <- mat[4, 1] <- 200
g1 <- new("graphAM", adjMat=mat, values=list(weight=1))
got <- edgeData(g1, to=c("a", "b"), attr="weight")
expect <- c("c|a", "d|a", "c|b", "d|b")
checkEquals(expect, names(got))
}
testEdgeDataToOnlyDir <- function() {
g1 <- simpleDirectedGraph()
edgeDataDefaults(g1, attr="weight") <- 1
edgeData(g1, from=c("a", "b"), to=c("c", "c"), attr="weight") <- c(10, 20)
got <- edgeData(g1, to=c("a", "b"), attr="weight")
expect <- c("d|b")
checkEquals(expect, names(got))
}
testEdgeDataSettingDirected <- function() {
g1 <- simpleDirectedGraph()
myAttributes <- list(size=1, dim=c(3, 3), name="fred")
edgeDataDefaults(g1) <- myAttributes
edgeData(g1, from="a", to="d", attr="name") <- "Joe"
expect <- myAttributes
expect[["name"]] <- "Joe"
checkEquals(expect, edgeData(g1, from="a", to="d")[[1]])
fr <- c("a", "b")
to <- c("c", "c")
expect <- as.list(c(5, 5))
names(expect) <- c("a|c", "b|c")
edgeData(g1, fr, to, attr="size") <- 5
checkEquals(expect, edgeData(g1, fr, to, attr="size"))
expect <- as.list(c(10, 20))
names(expect) <- c("a|c", "b|c")
edgeData(g1, fr, to, attr="size") <- c(10, 20)
checkEquals(expect, edgeData(g1, fr, to, attr="size"))
edgeData(g1, from="a", attr="size") <- 555
checkEquals(rep(555, 2), as.numeric(edgeData(g1, from="a", attr="size")))
edgeData(g1, to="b", attr="size") <- 111
checkEquals(111, as.numeric(edgeData(g1, to="b", attr="size")))
}
testEdgeDataSettingUndirected <- function() {
mat <- simpleInciMat()
g1 <- new("graphAM", adjMat=mat)
myAttributes <- list(size=1, dim=c(3, 3), name="fred")
edgeDataDefaults(g1) <- myAttributes
edgeData(g1, from="a", to="d", attr="name") <- "Joe"
expect <- myAttributes
expect[["name"]] <- "Joe"
checkEquals(expect, edgeData(g1, from="a", to="d")[[1]])
## verify reciprocal edge data was set
checkEquals("Joe", edgeData(g1, from="d", to="a", attr="name")[[1]])
fr <- c("a", "b")
to <- c("c", "c")
expect <- as.list(c(5, 5))
names(expect) <- c("a|c", "b|c")
edgeData(g1, fr, to, attr="size") <- 5
checkEquals(expect, edgeData(g1, fr, to, attr="size"))
names(expect) <- c("c|a", "c|b")
checkEquals(expect, edgeData(g1, to, fr, attr="size"))
expect <- as.list(c(10, 20))
names(expect) <- c("a|c", "b|c")
edgeData(g1, fr, to, attr="size") <- c(10, 20)
checkEquals(expect, edgeData(g1, fr, to, attr="size"))
names(expect) <- c("c|a", "c|b")
checkEquals(expect, edgeData(g1, to, fr, attr="size"))
edgeData(g1, from="a", attr="size") <- 555
checkEquals(rep(555, 2), as.numeric(edgeData(g1, from="a", attr="size")))
checkEquals(555, edgeData(g1, from="c", to="a", attr="size")[[1]])
edgeData(g1, to="b", attr="size") <- 111
checkEquals(rep(111, 2), as.numeric(edgeData(g1, to="b", attr="size")))
checkEquals(111, edgeData(g1, from="c", to="b", attr="size")[[1]])
}
testEdgeDataSettingFromOnly <- function() {
mat <- simpleInciMat()
g1 <- new("graphAM", adjMat=mat)
myAttributes <- list(size=1, dim=c(3, 3), name="fred")
edgeDataDefaults(g1) <- myAttributes
expect <- rep("fred", 5)
got <- unlist(edgeData(g1, from=c("a", "d"), attr="name"), use.names=FALSE)
checkEquals(expect, got, "precondition check")
edgeData(g1, from=c("a", "d"), attr="name") <- "Sam"
expect <- rep("Sam", 5)
got <- unlist(edgeData(g1, from=c("a", "d"), attr="name"), use.names=FALSE)
checkEquals(expect, got, "use from only in assign")
}
testNormalizeEdges <- function() {
checkException(graph:::.normalizeEdges(c("b", "d"), c("a", "b", "c")), silent=TRUE)
checkException(graph:::.normalizeEdges(c("a", "b", "c"), c("a", "e")), silent=TRUE)
f <- letters[1:10]
t <- letters[11:20]
checkEquals(list(from=f, to=t), graph:::.normalizeEdges(f, t))
checkEquals(list(from=c("a", "a", "a"), to=c("a", "b", "c")),
graph:::.normalizeEdges("a", c("a", "b", "c")))
checkEquals(list(from=c("a", "b", "c"), to=c("d", "d", "d")),
graph:::.normalizeEdges(c("a", "b", "c"), "d"))
}
|
1835f6c9f0c8ed607a1dd0b492cccd81919f1885
|
f5bbcd5d0436d05c52ec56378f3f77152f739535
|
/R/EMSfin.r
|
cdfb95d7f1f743a63087eee8d3fc953ec870d067
|
[] |
no_license
|
cran/varcompci
|
931e354dd5f86588398fda731311979744f6b050
|
e5d9a6d909762e9b9ecef462b2ec9c660a8ff58d
|
refs/heads/master
| 2020-04-14T23:46:07.129750
| 2011-02-14T00:00:00
| 2011-02-14T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,634
|
r
|
EMSfin.r
|
setClass("EMSc",representation(
EMSpretty="matrix",
result_EMS= "matrix",
namesdesc="matrix",
result_EMSlF="matrix",
final_EMS="matrix"
))
setMethod(
f="[",
signature=c("EMSc","character","missing","missing"),
def = function(x,i,j,drop){
switch(EXP=i,
EMSpretty = return(x@EMSpretty ),
result_EMS = return(x@result_EMS),
namesdesc = return(x@namesdesc),
result_EMSlF = return(x@result_EMSlF),
final_EMS = return(x@final_EMS),
stop("Error:",i,"is not a EMSc slot")
)
}
)
.EMSc.show=function(object){
cat("Expected Mean Square in nice format\n")
print(object["EMSpretty"])
}
setMethod(f="show",signature="EMSc",definition=.EMSc.show)
prettyEMSf<-function(totvar,Matrix,dsn,...){
nv<-length(totvar)
possibilities=infoNLST(totvar,nv,Matrix,dsn)
countper=combinposs(nv)
EMSmat=EMSmatrix(possibilities,Matrix,nv,countper,totvar)
matrix_EMS=EMSmat[[1]]
subscripfact=EMSmat[[2]]
typefact=EMSmat[[3]]
matrixnameslnw=EMSmat[[4]]
# number of columns
nvari=nrow(matrix_EMS)
# Expected Mean Square ("99999" is SAS Q(Vx))
rEMS=EMS(subscripfact,typefact,nvari,matrix_EMS,matrixnameslnw,nv)
object= new(Class="EMSc")
object@result_EMS=rEMS[[1]]
object@namesdesc=rEMS[[2]]
object@EMSpretty=EMSwdesc(object@result_EMS,object@namesdesc,nvari,matrixnameslnw)
object@result_EMSlF=as.matrix(EMSlF(object@result_EMS,typefact,nvari))
object@final_EMS=last_EMS(nvari,object@result_EMS)
return(object)
}
|
b5cbd8f3951009aded55048635d37b85354819a1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MEMSS/examples/Oats.Rd.R
|
fc015a8988179f72ef561563a03db07e542ac1be
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
Oats.Rd.R
|
library(MEMSS)
### Name: Oats
### Title: Split-plot Experiment on Varieties of Oats
### Aliases: Oats
### Keywords: datasets
### ** Examples
str(Oats)
|
caaaee4e7f71ea58c02a0dd46c794622d6bcf695
|
092dac9550c38286881f11957d6afdee4232253b
|
/man/simumix.Rd
|
d4e266048a3829a1dd3c411a1ae53d0222386955
|
[] |
no_license
|
hindantation/forensim
|
b60016864c7cfa058a0958fd02fe0c5588b8393d
|
0eca44897e6ea6b5bec99217c18362da8b266262
|
refs/heads/master
| 2022-10-11T20:04:39.176056
| 2022-10-04T07:39:12
| 2022-10-04T07:39:12
| 220,780,908
| 0
| 1
| null | 2022-10-04T07:39:13
| 2019-11-10T11:35:19
|
R
|
UTF-8
|
R
| false
| false
| 1,757
|
rd
|
simumix.Rd
|
\encoding{UTF-8}
\name{simumix}
\alias{simumix-class}
\alias{names,simumix-method}
\alias{print,simumix-method}
\alias{show,simumix-method}
\title{forensim class for DNA mixtures}
\description{The S4 \code{simumix} class is used to store DNA mixtures of individual genotypes
along with informations about the individuals poulations and the loci used to simulate the genotypes.}
\section{Slots}{
\describe{
\item{\code{ncontri}:}{ integer vector giving the number of contributors to the DNA mixture. If there are
several populations, \code{ncontri} gives the number of contributors per population}
\item{\code{mix.prof}:}{ matrix giving the contributors genotypes (in rows) for each locus (in columns).
The genotype of a homozygous individual carrying the allele "12" is coded "12/12". A heterozygous
individual carrying alleles "12" and "13" is coded "12/13" or "13/12".}
\item{\code{mix.all}:}{list giving the alleles present in the mixture for each locus}
\item{\code{which.loc}:}{ character vector giving the locus names}
\item{\code{popinfo}:}{ factor giving the population of each contributor }
}
}
\section{Methods}{
\describe{
\item{names}{\code{signature(x = "simumix")}: gives the names of the attributes of a simumix object }
\item{show}{\code{signature(object = "simumix")}: shows a simumix object}
\item{print}{\code{signature(object = "simumix")}: prints a simumix object }
}
}
\seealso{ \code{\linkS4class{simugeno}}, \code{\link{as.simumix}}, \code{\link{is.simumix}}, \code{\link{simugeno}} and \code{\link{tabfreq}}}
\author{ Hinda Haned \email{h.haned@nfi.minvenj.nl} }
\examples{
\dontrun{
showClass("simumix")
data(strusa)
}
}
\keyword{classes}
\keyword{manip}
\keyword{datagen}
|
46f3f21869e786ee78adb594315136e844d1adbd
|
bf52e409724bea2c2098ed611f879ab5e767f562
|
/plot2.R
|
2e73249fe142e4f441ca0cd8c8f8a8ad0edf95c0
|
[] |
no_license
|
hayesn22/ExData_Plotting1
|
7fc7a4f5e5428f64907921fb0325b8538ff08152
|
2b7e9d87c6f9b24dbf8084ee2b9b8d93a596a956
|
refs/heads/master
| 2020-06-25T14:45:02.836233
| 2019-07-29T00:15:19
| 2019-07-29T00:15:19
| 199,341,019
| 0
| 0
| null | 2019-07-28T21:57:13
| 2019-07-28T21:57:13
| null |
UTF-8
|
R
| false
| false
| 1,005
|
r
|
plot2.R
|
householdpower <- read.table("household_power_consumption.txt",skip=1,sep=";", na.strings = "?", colClasses = c('character', 'character', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric'))
names(householdpower) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subhousepower <- subset(householdpower,householdpower$Date=="1/2/2007" | householdpower$Date =="2/2/2007")
subhousepower$Date <- as.Date(subhousepower$Date, format="%d/%m/%Y")
subhousepower$Time <- strptime(subhousepower$Time, format="%H:%M:%S")
subhousepower[1:1440,"Time"] <- format(subhousepower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subhousepower[1441:2880,"Time"] <- format(subhousepower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
plot(subhousepower$Time,subhousepower$Global_active_power, type = "l",xlab = "", ylab = "Global Active Power (kilowatts)")
dev.copy(png, "plot2.png", width = 480, height = 480)
dev.off()
|
90d07b9fc6bd09a5d55cf7227d9f237c389eb242
|
67777576fda46de12cc276f08470d0001666b521
|
/R/sample_intercept.R
|
08f8314047d9eb68eed136882c4a07075168a7b7
|
[] |
no_license
|
yjustc2019/WeibullHM
|
e883690d702e6cb3e13f79ee565b1489883bb2c0
|
357b12921507b11deebe65264a04923deedf5e3c
|
refs/heads/master
| 2021-12-22T13:33:08.547347
| 2017-10-15T01:05:06
| 2017-10-15T01:05:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
sample_intercept.R
|
#
# sample_intercept.R
#
# Created by Zhifei Yan
# Last update 2017-4-22
#
#' Sample intercept effects
#'
#' Produce a Gibbs sample of intercept effects of all states
#'
#' @param y a matrix of current update of Weibull log scale parameters
#' @param alpha a matrix of current update of subject random effects
#' @param var_logscale a vector of current update of variances of Weibull log scale parameters
#' @param m_mu a vector of means of multivariate normal prior of intercept effects
#' @param sigma_mu_inv inverse covariance matrix of multivariate normal prior
#' of intercept effects
#' @param nsubj total number of subjects
#'
#' @return A Gibbs sample of intercept effects of all states
#' @export
sample_intercept <- function(y, alpha, var_logscale, m_mu,
sigma_mu_inv, nsubj) {
xty <- apply(y - alpha, 2, sum) / var_logscale
xtx <- nsubj * diag(1 / var_logscale)
cov_post <- solve(sigma_mu_inv + xtx)
m_post <- cov_post %*% (sigma_mu_inv %*% m_mu + xty)
draw <- mvrnorm(1, m_post, cov_post)
while (! all(draw == cummax(draw))) {
draw <- mvrnorm(1, m_post, cov_post)
}
draw
}
|
1d29204bbdb0c90fab17ca129b9f6705287e5fd4
|
7e0e64d363b4dde2bce1840bf5dd0f2199f0b88e
|
/send_mail.R
|
568faad8f7f68cd347ecfadcc061b01db40e970f
|
[] |
no_license
|
krzyslom/auto_mail
|
2e6a5c6433ee8d25359df087803048316de5e6d9
|
d5803f0b1fdfd88a6cc6ac578e4049e7844c8778
|
refs/heads/master
| 2021-01-22T07:57:25.271867
| 2017-05-30T14:17:44
| 2017-05-30T14:17:44
| 92,586,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,530
|
r
|
send_mail.R
|
# devtools::install_github("rpremraj/mailR") # wymaga instalacji Java
library(dplyr)
library(knitr)
library(mailR)
library(rmarkdown)
# library(readr)
# library(tidyr)
# Parametry poczty
from <- "user@domena.dom"
subject <- "Temat"
smtp <- list(host.name = "Adres servera SMTP",
port = 465,
user.name = "Użytkownik",
passwd = "Hasło",
ssl = TRUE)
# Dane z Google Spreadsheet na podstawie Google Forms
# File -> Download as -> csv
spreadsheet <- readr::read_csv("ścieżka/do/spreadsheet.csv")
for (i in 1:nrow(spreadsheet)) {
rmarkdown::render(input = "mail_content.Rmd",
output_format = "html_document",
output_file = paste0(
paste(spreadsheet[i, ]$Nazwisko,
spreadsheet[i, ]$Imię,
sep = "_"),
".html"),
output_dir = "email",
params = list(form = spreadsheet[i, ]),
encoding = "utf-8")
email <- mailR::send.mail(from = from,
to = spreadsheet[i, ]$email,
subject = subject,
body = "docs/mail_content.html",
encoding = "utf-8",
html = TRUE,
smtp = smtp,
authenticate = TRUE,
send = FALSE)
email$send() # ewentualnie wyżej send = TRUE
}
|
a2d0b25dfe4029a266b556af9baa55643e505b53
|
c4b88246c20acd36790f988b3fc1aa59170a9cd1
|
/48674698/spawn.R
|
0b22a6a6894a141735b440ddfe37216315c51257
|
[] |
no_license
|
dewittpe/so
|
48d6ef6b75a851c831fe78a1829f5acd264caed9
|
f11721642a1ad765e3bc382659dd2b7d0de21160
|
refs/heads/master
| 2021-06-07T11:47:39.334548
| 2020-03-12T15:22:29
| 2020-03-12T15:22:29
| 132,039,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 376
|
r
|
spawn.R
|
rp <- processx::run("R", "--vanilla")
rp <- processx::process$new("R", "--vanilla")
rp$is_alive()
servr::httd(daemon = TRUE, browser = FALSE, port = 4321)
R.utils::withTimeout(
{
s <- rvest::html_session("http://127.0.0.1:4321")
},
timeout = 3,
onTimeout = "error")
s
|
c75993d4d21ac7b93de22e2c29d4a0f228eea491
|
ec9ad043b7eb8c868e972fc21011b89e32a0433e
|
/man/make_filename.Rd
|
3a90ff945be773c28f9a98f57c7ec06ddef7dfad
|
[] |
no_license
|
olb1605/FARS_Package_Coursera
|
ef65a1063435af897926a4780b2518ef1409481b
|
f16459798041f02cef306f24d67bf77763a29310
|
refs/heads/master
| 2020-03-14T03:59:55.807754
| 2018-04-28T21:13:33
| 2018-04-28T21:13:33
| 131,431,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 805
|
rd
|
make_filename.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{make_filename}
\alias{make_filename}
\title{Function "make_filename" - creates a file name in a specified format}
\usage{
make_filename(year)
}
\arguments{
\item{year}{A string or an integer}
}
\value{
This function returns a string with the CSV file name for a given
year
}
\description{
Function creates a string to be used as a name of CSV file
using a given year and a string "accident_" ... ".csv.bz2".
}
\details{
This function contributes to the functions fars_read_years,
fars_map_state
}
\note{
The returned string of this function used in the function fars_read
and as an input in the function fars_map_state to make an input parameter
filename
}
\examples{
\dontrun{
make_filename(2015)}
}
|
d0bc5d423534a62eb7afe4be98807f005c50be14
|
c234c1b9ee9fd67821c552a86dbe1ae65c3ab77d
|
/app.R
|
af0f433d45db0ac1c867d73d61b54a80c7d9d37d
|
[
"MIT"
] |
permissive
|
mick14731/Econ-dashboard
|
4cbf535a152dbeeccbf2f12688afe51f24204933
|
fbd258cb46b133d195bb0b21aab92dd6d76d0fcc
|
refs/heads/master
| 2020-04-20T12:52:54.560548
| 2019-05-21T23:55:04
| 2019-05-21T23:55:04
| 168,854,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,179
|
r
|
app.R
|
library(shiny)
source("functions.R")
source("cpi maker.R")
source("Lab_force.R")
ui <- shinyUI(fluidPage(
tabsetPanel(
tabPanel("CPI without",
sidebarLayout(
sidebarPanel(
width = 3,
selectizeInput("choice_Ex","Sectors to exclude:",
choices = colnames(CPI_INDEX.xts),
selected = colnames(CPI_INDEX.xts)[5],
multiple=TRUE,
options = list(placeholder = "Componant",maxItems = 10)),
selectizeInput("choice_add","Sectors to combine:",
choices = colnames(CPI_INDEX.xts),
selected = colnames(CPI_INDEX.xts)[5],
multiple=TRUE,
options = list(placeholder = "Componant",maxItems = 10))),
mainPanel(
width = 9,
fluidRow(column(width = 9,dygraphOutput("excluded"))),
fluidRow(column(width = 9, verbatimTextOutput("exTest"))),
fluidRow(column(width = 9,dygraphOutput("constructed"))),
fluidRow(column(width = 9, verbatimTextOutput("addTest"))))
)
),
tabPanel("Labour market",
sidebarLayout(
sidebarPanel(
witdh=3,
selectizeInput("labStat","Measure to view:",
choices = lab_chars,
selected = "Unemployment rate",
multiple=FALSE,
options = list(placeholder = "Labour measure")),
sliderInput("year",h3("Select year:"),
min = 1976, max = 2018, step = 1, value = 2000,
animate = FALSE)),
mainPanel(leafletOutput("labour"))
))
)
))
server <- shinyServer(function(input, output,session) {
#####
graph_data_ex <- reactive(
ind_sub(base = CPI_INDEX.xts$`All-items`*CPI_Weights.xts$`All-items`/RV$`All-items`,
choices = input$choice_Ex,
indexs = CPI_INDEX.xts,
refValues = RV,
Weights = CPI_Weights.xts)
)
#####
graph_data_add <- reactive(
ind_add(choices = input$choice_add,
indexs = CPI_INDEX.xts,
refValues = RV,
Weights = CPI_Weights.xts)
)
#####
output$constructed <- renderDygraph(
dygraph(graph_data_add())%>%
dyRangeSelector()
)
#####
output$excluded <- renderDygraph(
dygraph(graph_data_ex())%>%
dyRangeSelector()
)
#####
output$exText <- renderText( c("CPI inflation excluding:",paste(input$choice_Ex, collapse = ", " )))
output$addText <- renderText( c("CPI inflation of:",paste(input$choice_add, collapse = ", " )))
#####
lng.center <- -99
lat.center <- 55
zoom.def <- 3
get_data <- reactive({
lab_data[which(lab_data$year == input$year & lab_data$`Labour force characteristics`==input$labStat),]
})
pal <- reactive({
colorNumeric("viridis", domain = legend_values())
})
legend_values <- reactive(
switch(input$labStat,ranges[[input$labStat]])
)
output$labour <- renderLeaflet({
leaflet(data = data.p) %>%
addProviderTiles("OpenStreetMap.Mapnik", options = providerTileOptions(opacity = 1), group = "Open Street Map") %>%
setView(lng = lng.center, lat = lat.center, zoom = zoom.def) %>%
addPolygons(group = 'base',
fillColor = 'transparent',
color = 'black',
weight = 1.5) %>%
addLegend(pal = pal(),
values = legend_values(),
opacity = 0.7,
title = NULL,
position = "topright")
})
observe({
l_data <- get_data()
leafletProxy('labour', data = l_data) %>%
clearGroup('polygons') %>%
addPolygons(group = 'polygons',
fillColor = ~pal()(VALUE),
fillOpacity = 0.9,
color = 'black',
weight = 1.5)
})
#####
})
#####
# Run the application
shinyApp(ui = ui, server = server)
|
4f39306430b8ea1cf0b105d96a634e6f44e31cfe
|
9b6f4de24c64ddc70e4ec59bd2f030dd58436a94
|
/TS/ts3.R
|
470c32ecf1f64486db8065a7f0675d4235a4be84
|
[] |
no_license
|
GopalKrishna-P/analytics
|
e1c5207fc1f8132db371886a83f80b7272786ee0
|
232a32b7317ede1897794745acd6f2b7b0ac393d
|
refs/heads/master
| 2023-04-26T18:24:14.978599
| 2021-05-29T09:50:16
| 2021-05-29T09:50:16
| 129,134,381
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,769
|
r
|
ts3.R
|
# Time Series
#dataset
AirPassengers
class(AirPassengers)
JohnsonJohnson
nhtemp
Nile
sunspots
ds = list(AirPassengers,JohnsonJohnson,nhtemp,Nile,sunspots)
sapply(ds, class)
# Sales TS Data
sales = c(18, 33, 41, 7, 34, 35, 24, 25, 24, 21, 25, 20,
22, 31, 40, 29, 25, 21, 22, 54, 31, 25, 26, 35)
tsales = ts(sales, start=c(2003, 1), frequency=12)
tsales
plot(tsales)
start(tsales)
end(tsales)
frequency(tsales)
(tsales.subset = window(tsales, start=c(2003, 5), end=c(2004, 6)))
tsales.subset
#SMA
Nile
library(forecast)
opar = par(no.readonly = T)
par(mfrow=c(2,2))
(ylim = range(Nile))
plot(Nile, main='Original TS')
head(Nile)
head(ma(Nile,3))
mean(Nile[1:3])
(1120+1160+963)/3
plot(ma(Nile,3), main='SMA k=3', ylim=ylim)
plot(ma(Nile,7), main='SMA k=7', ylim=ylim)
plot(ma(Nile,15),main='SMA k=15', ylim=ylim)
par(opar)
# Listing 15.4 - Simple exponential smoothing
library(forecast)
nhtemp
par(mfrow=c(1,1))
plot(nhtemp)
(fitse = ets(nhtemp, model='ANN'))
(fitse2 = ses(nhtemp))
forecast(fitse,3)
plot(forecast(fitse,c(3)))
accuracy(fitse)
#Holt Exponential Smoothening
TS = level + slope * t + irregular
plot(AirPassengers)
#log model to use additive model
plot(log(AirPassengers))
(fithe = ets(log(AirPassengers), model='AAA'))
(pred = forecast(fithe, 5))
plot(pred, main='Forecast for Air Travel', ylab='Log (Air Passengers)', xlab='Time')
#since log was used, use exp to get predicted values
pred$mean
(pred$mean = exp(pred$mean))
(pred$lower = exp(pred$lower))
(pred$upper = exp(pred$upper))
(p = cbind(pred$mean, pred$lower, pred$upper))
(pred$mean = exp(pred$mean))
#Holt Winters Exponential Smoothening
TS = level + slope * t + s(t) + irregular
fit <- HoltWinters(nhtemp, beta=FALSE, gamma=FALSE)
fit
forecast(fit, 1)
plot(forecast(fit, 1), xlab="Year", ylab=expression(paste("Temperature (", degree*F,")",)), main="New Haven Annual Mean Temperature")
accuracy(fit)
# Listing 15.5 - Exponential smoothing with level, slope, and seasonal components
fit <- HoltWinters(log(AirPassengers))
fit
accuracy(fit)
pred <- forecast(fit, 5)
pred
plot(pred, main="Forecast for Air Travel",
ylab="Log(AirPassengers)", xlab="Time")
pred$mean <- exp(pred$mean)
pred$lower <- exp(pred$lower)
pred$upper <- exp(pred$upper)
p <- cbind(pred$mean, pred$lower, pred$upper)
dimnames(p)[[2]] <- c("mean", "Lo 80", "Lo 95", "Hi 80", "Hi 95")
p
# Listing 15.6 - Automatic exponential forecasting with ets()
library(forecast)
fit <- ets(JohnsonJohnson)
fit
plot(forecast(fit), main="Johnson and Johnson Forecasts",
ylab="Quarterly Earnings (Dollars)", xlab="Time")
# Listing 15.7 - Transforming the time series and assessing stationarity
library(forecast)
library(tseries)
plot(Nile)
ndiffs(Nile)
dNile <- diff(Nile)
plot(dNile)
adf.test(dNile)
|
a9135555cf42d844d1466741aeae0903fc2b7505
|
a06c3e5453bc4ef2f882b6e71c803a0e9498afe7
|
/class5/041818.R
|
863ea03ffbd6bb91dadd4fc0ffd2ba8e0c610445
|
[] |
no_license
|
clocheltree/bggn213
|
df81d6f60913622d1342d49a7ddc158fe3b0e515
|
7a387939f9b1d2d2c93cab6ab6c2192fb97669b1
|
refs/heads/master
| 2020-03-15T14:24:11.646519
| 2018-06-06T21:30:25
| 2018-06-06T21:30:25
| 132,189,301
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
041818.R
|
# Bioinformatics Class 5
# Plots
x <- rnorm(1000,0)
summary(x)
# let's see this data a graph
boxplot(x)
hist(x)
# Section 1 from lab sheet
baby <- read.table("bggn213_05_rstats/weight_chart.txt", header = T)
plot(baby, type = "b", pch = 19, cex = 0.5, lwd = 0.5, ylim=c(2,10), xlab="Age (months)", ylab="Weight (kg)" )
# Section 1B
feat <- read.table("bggn213_05_rstats/feature_counts.txt", sep = "\t", header = T)
par(mar = c(5,11,4,2))
barplot(feat$Count, names.arg = feat$Feature, horiz = T, las = 2)
# Section 2
rawr <- read.delim("bggn213_05_rstats/male_female_counts.txt")
barplot(rawr$Count, col = "000000")
#Expression analysis
palette(c("red", "black", "blue"))
booty <- read.delim("bggn213_05_rstats/up_down_expression.txt")
plot(booty$Condition1, booty$Condition2, col=booty$State, cex=0.5, pch=19)
table(booty$State)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.