blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5aaf3b3a5e061180f96f91f23fc2a6bd42ca6b9
|
08b7728c2120413ad2266a5e7ae3403e6b67471b
|
/cryptoJNS/R/hashtools.R
|
572bdb1e4c335a3757da30798257d95d91f215de
|
[] |
no_license
|
JamesSolum/Codes-and-Encryption
|
8241438fa8fa6c1e36ecfc293d6c1bf02cc3f36b
|
59df5d2ca7505795bd876db2b532351a28a9a122
|
refs/heads/master
| 2020-05-29T21:04:36.541728
| 2017-02-21T00:09:00
| 2017-02-21T00:09:00
| 82,615,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,051
|
r
|
hashtools.R
|
#' Miniature SHA hash
#'
#' Implements a 32-bit hash function by taking the first 32 bits of the SHA-1 hash.
#'
#' @param x A character string.
#'
#' @return A four-character hash string.
#' @export
#' @import openssl
#'
#' @examples
#' miniSHA("Corned Beef and Haggis") # should be "de0d"
miniSHA <- function(x) {
x <- sha1(charToRaw(x))
x <- paste(x[1], x[2], sep = "")
return(x)
}
#' MD5 block cipher
#'
#' This block cipher encrypts in 128-bit blocks. Encrypts a raw vector using a variation of
#' OFB mode. (Certainly not very secure.) The initial vector \code{iv} is converted to
#' a raw vector representing its MD5 hash. This raw vector becomes X_1. If the blocks of
#' \code{m} are P_1, P_2, ..., P_k, then O_i is the MD5 hash of X_i using \code{k} as the
#' HMAC key (as a string), where X_(i+1) = O_i. The ciphertext
#' block C_i is the exclusive-or of O_i and P_i.
#'
#' @param m A raw vector, representing the message to be encrypted/decrypted.
#' Assumed to have length a multiple of 16 (i.e., the message length is a multiple
#' of 128-bits).
#' @param ky A string, used as the symmetric key.
#' @param iv A string, used as an initial vector.
#'
#' @return A raw vector of the same length as \code{m}
#'
#' @export
#' @import openssl
#'
#' @examples
#' testct <- "This block cipher is for educational purposes only. Learn well!!"
#' ct <- md5Cipher(charToRaw(testct), "Runner4567", "init vector") # should end in: 78 be 24 73
#' rawToChar(md5Cipher(ct, "Runner4567", "init vector")) # decryption
md5Cipher <- function(m, ky, iv){
if((length(m) %% 16) != 0)
stop("Message length is not a multiple of 128-bits.")
numOfBlks <- length(m)/16
x <- md5(charToRaw(iv))
o <- raw(length = length(m))
ct <- raw(length = length(m))
o[1:16] <- md5(x, ky)
for (i in 1:numOfBlks){
x <- o[((16*(i-1))+1):(16*i)]
o[(((16*(i-1))+1)+16):((16*i)+16)] <- md5(x, ky)
}
for (i in 1:numOfBlks){
ct[((16*(i-1))+1):(16*i)] <- xor((m[((16*(i-1))+1):(16*i)]), (o[((16*(i-1))+1):(16*i)]))
}
return(ct)
}
|
e734ef8ccd20a4ecb250e4ad1ced3ca4534c09c4
|
d24f41fc305f63fb86132cd18cab8907be132d7e
|
/cachematrix.R
|
c78c77610fc52fea9a832a5c73d9293c98b1d757
|
[] |
no_license
|
a-d-m/ProgrammingAssignment2
|
9782a9798b1f0801b411227c3b1490c754684f52
|
07cad30a526dadd37cb66a45349ae51000993ac5
|
refs/heads/master
| 2021-01-18T10:45:04.489270
| 2016-02-07T10:09:50
| 2016-02-07T10:09:50
| 51,242,937
| 0
| 0
| null | 2016-02-07T09:57:22
| 2016-02-07T09:57:21
| null |
UTF-8
|
R
| false
| false
| 1,313
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Entering a comment to notate the severe disconnect between
## the material studied, depth of swirl exercises, and the actual
## assignments expected to be completed. This should also break up
## the monotony for the viewers of the Week 3 assignment.
## Creates a special matrix populated via a list
## using 'set' and 'get' to enter and retrieve values
makeCacheMatrix <- function(x = matrix()) {
inv_mtrx <- NULL
set <- function(y) {
x <<- y
inv_mtrx <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv_mtrx <<- inverse
getInverse <- function() inv_mtrx
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Calculates the inverse of matrix created in the makeCacheMatrix
## function.
cacheSolve <- function(x, ...) {
inv_mtrx <- x$getInverse()
if (!is.null(inv_mtrx)) {
message("getting cached data")
return(inv_mtrx)
}
mtrx <- x$get()
inv_mtrx <- solve(mtrx, ...)
x$setInverse(inv_mtrx)
inv_mtrx
}
|
058f303e1282987a556805128446e491e40e29dd
|
d2129c74ed601e23dc34ab451f2f9337af3dc588
|
/man/grapes-greater-than-grapes.Rd
|
7d09daf50d85fb5cbc06fc4a03204a243cef456c
|
[
"MIT"
] |
permissive
|
kferris10/rcdimple
|
9aa69f530c0958043c34f57b281478f0b1d0604e
|
9dfc80320c745d45b522528432e78e013eb055de
|
refs/heads/master
| 2021-01-23T20:55:51.743348
| 2015-10-02T16:35:51
| 2015-10-02T16:35:51
| 32,642,581
| 1
| 0
| null | 2015-03-21T17:37:58
| 2015-03-21T17:37:57
| null |
UTF-8
|
R
| false
| false
| 274
|
rd
|
grapes-greater-than-grapes.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/utils.R
\name{\%>\%}
\alias{\%>\%}
\title{pipe with magrittr importFrom; uses dplyr technique}
\usage{
lhs \%>\% rhs
}
\description{
pipe with magrittr importFrom; uses dplyr technique
}
|
5e1e5100c6b0e62cc223935205e0a2afba06d2fc
|
38d59286ad65e9fbb89182bb058e648be936828f
|
/FinalResearchProject/archive/old.ema.test.R
|
e401b6722f967fd2220c1fd8cad8b6cd1023d388
|
[] |
no_license
|
navdeepsingh8/BirkbeckStats
|
c40ed7e250545bde3cbd09a264aaf66d790da4b3
|
c1d2f2dcfc6cae6961ee359ef88930f52644ca80
|
refs/heads/master
| 2021-01-20T19:34:54.202876
| 2016-07-01T18:17:47
| 2016-07-01T18:17:47
| 61,050,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
old.ema.test.R
|
#Tests in this script:
# Compute PL for multiple parameterisations of the 2-EMA crossover rule
# Calculate Sharpe ratio and drawdown statistics for each one
#load packages
source("preamble.R")
source("functions.R")
#load data
load("FedData.Rdata")
#two ema crossover rule
#parameters
params <- vector("list",2)
params[[1]] <- seq(5,10,5)
params[[2]] <- seq(50,60,25)
pl <- vector("list",length(params[[1]])*length(params[[2]]))
i <- 1
for (p1 in params[[1]]) {
for (p2 in params[[2]]) {
pl[[i]] <- rule.pl(FedData$CAD,"twoEMA",params=c(p1,p2),return.EMA=FALSE,lag.pl=1,tc=0.002)
i <- i+1
cat(i/length(pl),"\t")
}
}
#figure out date convention for FX data
#plot(cumprod(1+pl$net)-1)
#ann return, sharpe, dd ratio
#260*mean(pl)
sr <- matrix(NA,length(pl),2)
dd <- sr
for (i in 1:length(pl)) {
sr[i,] <- sqrt(260)*apply(pl[[i]]$pl,2,mean)/apply(pl[[i]]$pl,2,sd)
#dd[i,] <- 260*apply(pl[[i]]$pl,2,mean)/apply(pl[[i]]$pl,2,ddstat)
}
persp(params[[1]], params[[2]], matrix(sr[,2],9,9), phi = 30, theta = 45,
xlab = "X Coordinate (feet)", ylab = "Y Coordinate (feet)",
main = "Surface elevation data",ticktype="detailed"
)
|
bab7e531406145dea572bdbd93bd0036ea63eb94
|
4590ff8f7115ac49c657a863df78d042611359d8
|
/savio/pi.r
|
72903e9a0818afea6f312ad8f3e636823bdcaa73
|
[] |
no_license
|
BerkeleyBiostats/tl-app-experiments
|
c36b28e3c414bdb1357502c9bf84e1c4175b1a37
|
59b53021f45aefbee1a9fb09d16fe0f988170cc8
|
refs/heads/master
| 2021-01-01T16:09:08.550126
| 2017-07-30T08:00:15
| 2017-07-30T08:00:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 692
|
r
|
pi.r
|
library(batchtools)
reg = makeRegistry(file.dir = '/global/home/users/marcpare/pi', seed = 1)
reg$cluster.functions = makeClusterFunctionsSlurm(template="/global/home/users/marcpare/batchtools.slurm.tmpl",
clusters = NULL, array.jobs = TRUE, scheduler.latency = 1,
fs.latency = 65)
saveRegistry(reg=reg)
piApprox = function(n) {
nums = matrix(runif(2 * n), ncol = 2)
d = sqrt(nums[, 1]^2 + nums[, 2]^2)
4 * mean(d <= 1)
}
piApprox(1000)
batchMap(fun = piApprox, n = rep(1e5, 10))
names(getJobTable())
submitJobs(resources = list(walltime = 3600, memory = 1024, ncpus=1, partition='savio2'))
waitForJobs()
mean(sapply(1:10, loadResult))
reduceResults(function(x, y) x + y) / 10
|
f9790325ea06a4dd6f545f0b5c4f2f704cfe9e07
|
8fd3836f4292a8a3d917e9737f037afabb5db502
|
/man/six_hourly_precip.Rd
|
5766deca2c8ce6f65269ca8ff0e581814ca63ad3
|
[] |
no_license
|
everydayduffy/climvars
|
beb8202b60db00012273a6fac50c8548274fcc1e
|
886a5d2642cc132642563ab3fffcc6b9994706cf
|
refs/heads/master
| 2023-06-12T15:42:48.936770
| 2021-07-09T08:59:09
| 2021-07-09T08:59:09
| 256,446,786
| 0
| 0
| null | 2020-04-17T08:32:12
| 2020-04-17T08:32:11
| null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
six_hourly_precip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{six_hourly_precip}
\alias{six_hourly_precip}
\title{Six-hourly precipitation for 2019.}
\format{A numeric vector of precipitation (mm/six-hour).}
\source{
\url{https://www.ecmwf.int/en/era5-land}
}
\usage{
six_hourly_precip
}
\description{
A dataset containing six-hourly precipitation for 2019 from the
ERA-5 Land dataset. Data for a location near Kinlochewe, Scotland (-5.2,
57.6).
}
\keyword{datasets}
|
d80bfdd683ede461eb3cecb487e2d0bc7d442de0
|
3ec118f35ddf7983639c3b3f3345689b85241729
|
/plot6.R
|
b7756d14f85c92472966a5e1d48b8497214891fc
|
[] |
no_license
|
ryanniemann/Exploratory-Data-Analysis-Project-2
|
4a368c5d0318490e226efeca31d9c93dc0e2eb41
|
95877d557a8c11163bf922275cb829081641093a
|
refs/heads/master
| 2020-04-15T14:31:51.790910
| 2015-12-18T04:38:22
| 2015-12-18T04:38:22
| 48,208,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,384
|
r
|
plot6.R
|
#6.Compare emissions from motor vehicle sources in Baltimore City
#with emissions from motor vehicle sources in Los Angeles County,
#California (fips == "06037"). Which city has seen greater
#changes over time in motor vehicle emissions?
#Read the data
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
#Subset the motor vehicles
vehicles <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesNEI <- NEI[NEI$SCC %in% vehiclesSCC,]
#Subset the motor vehicles for
#Baltimore City, Maryland (fips "24510")
baltimoreVehiclesNEI <- vehiclesNEI[vehiclesNEI$fips=="24510",]
baltimoreVehiclesNEI$city <- "Baltimore City, Maryland"
#Subset the motor vehicles for
#Los Angeles County, California (fips "06037")
vehiclesLANEI <- vehiclesNEI[vehiclesNEI$fips=="06037",]
vehiclesLANEI$city <- "Los Angeles County, California"
#Combine
combinedData <- rbind(baltimoreVehiclesNEI, vehiclesLANEI)
#Create plot
library(ggplot2)
ggp <- ggplot(combinedData, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(stat="identity") +
facet_grid(scales="free", space="free", .~city) +
guides(fill=FALSE) + theme_bw() +
labs(x="Year", y=expression("Amount of PM2.5 Emitted in Tons")) +
ggtitle("
Total Emissions from PM2.5
Motor Vehicle Sources")
print(ggp)
|
9564fe181976ac72ad953f102104f334f896dae2
|
a63a05179f9d4494356cd43b58f632d80b21dabd
|
/R/internals.R
|
9df45287fc1a636bd57c8ed83162b667c170cb14
|
[] |
no_license
|
cran/rasciidoc
|
6d524dc8b1fdb44759a2c560dfe8d9b18cf174df
|
4fa4e1ed55487ca8735a2346017c2438359eecfc
|
refs/heads/master
| 2023-06-28T06:53:38.425600
| 2023-06-14T07:20:02
| 2023-06-14T07:20:02
| 173,755,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,131
|
r
|
internals.R
|
.onAttach <- function(...) { # Exclude Linting
if (!isTRUE(getOption("write_to_disk")))
packageStartupMessage("\n", hint_writing())
}
write_default_output <- function(msg, adoc_file) {
lines <- readLines(system.file("files", "default.html",
package = "rasciidoc"))
lines <- sub("DEFAULT_TEXT", paste(msg, collapse = "<br>"), lines)
writeLines(lines, con = sub("\\.[A-z]*$", ".html", adoc_file))
status <- FALSE
return(status)
}
hint_writing <- function(path = "the input file") {
t <- paste0("Due to the CRAN policy of not writing \"anywhere else on the",
" file system apart from the R session's temporary directory\"",
" we work on a temporary copy of ", path, ".",
"\n", "Thus all internal sourcing and internal links will be",
" broken and any output is written to ", tempdir(), ".",
" Set the option \"write_to_disk\" to TRUE (using",
"\n", "\toptions(\"write_to_disk\" = TRUE)", "\n",
") to bypass this. You may want to include the above line into",
" your ~/.Rprofile.")
return(t)
}
discover_python <- function(first_only = TRUE, stop_on_error = TRUE) {
candidates <- sapply(c("python", "python2", "python3"),
function(x) return(as.character(Sys.which(x))))
## <<--- Adapted from reticulate(1.16)::py_discover_config()
# provide other common locations
if (is_windows()) {
candidates <- c(candidates,
reticulate::py_versions_windows()$executable_path)
} else {
candidates <- c(candidates,
"/usr/bin/python3",
"/usr/local/bin/python3",
"/opt/python/bin/python3",
"/opt/local/python/bin/python3",
"/usr/bin/python",
"/usr/local/bin/python",
"/opt/python/bin/python",
"/opt/local/python/bin/python",
path.expand("~/anaconda3/bin/python"),
path.expand("~/anaconda/bin/python")
)
}
candidates <- unique(candidates)
# filter locations by existence
if (length(candidates) > 0)
python_versions <- candidates[file.exists(candidates)]
## --->>
if (isTRUE(first_only)) python_versions <- python_versions[1]
if (is.na(python_versions) && isTRUE(stop_on_error))
throw("Found no python installation!")
return(python_versions)
}
get_python_version <- function(python) {
python_version <- sub("Python ", "",
system2(python, "--version",
stderr = TRUE, stdout = TRUE))
# NOTE: I remove release candidate markers from the current python
# version. I do so because python 2.7.18rc1 is
# currently (2020-04-14)
# installed on some CRAN maschines
#(r-devel-linux-x86_64-debian-clang).
# And package_version can't deal with release candidate markers.
# Since release candidates "can only have bugfixes applied that have
# been reviewed by other core developers"
# (https://devguide.python.org/devcycle/#release-candidate-rc).
# So it should be pretty save to do so. And I do not know any way to
# determine the last stable version before an rc
# (3.4.0rc1 gives what?).
python_version <- sub("rc.*$", "", python_version)
return(python_version)
}
get_python_major <- function(python_version, use2 = FALSE) {
python_major <- package_version(python_version)[[c(1, 1)]]
python_major <- as.character(python_major)
if (isTRUE(use2) && python_major == "3" &&
fritools::is_installed("python2")) {
# asciidoc was origninally written in python2, so python2 wins.
# TODO: if python2 is available, but the version is not
# sufficient,should I fall back to python3?
python_major <- "2"
}
return(python_major)
}
# NOTE: Matthew Peveler messes with the current asciidoc
# python3-implementation, so we use a historic working version.
# pass NULL to use the current one.
get_asciidoc <- function(version = NA,
tag = NA,
clean = FALSE # only for testing!
) {
if (is.na(version)) {
python <- discover_python(stop_on_error = FALSE)
if (is.na(python)) {
version <- "2"
} else {
version <- get_python_major(get_python_version(python))
}
} else {
python <- Sys.which(paste0("python", version))
}
local_asciidoc_path <- file.path(tempdir(), "asciidoc")
local_asciidoc_path <- normalizePath(local_asciidoc_path,
mustWork = FALSE)
config_file <- normalizePath(file.path(local_asciidoc_path,
"rasciidoc_config.R"),
mustWork = FALSE)
if (file.exists(config_file)) {
source(config_file, local = TRUE)
} else {
unlink(local_asciidoc_path, recursive = TRUE, force = TRUE)
if (isTRUE(clean)) on.exit(unlink(local_asciidoc_path, recursive = TRUE,
force = TRUE))
dir.create(local_asciidoc_path)
url <- switch(version,
"2" = "https://github.com/asciidoc-py/asciidoc-py2",
"3" = "https://github.com/asciidoc-py/asciidoc-py",
throw(paste("Could not find python version 2",
"nor python version 3."))
)
if (fritools::is_installed("git")) {
# gert fails to clone on some machines, so try to use a system
# installation of git first.
if (fritools::is_running_on_fvafrcu_machines() &&
fritools::is_windows()) {
# FVAFR messes with its proxies...
# this is a private local setting.
# Don't bother.
url <- sub("^(http)s", "\\1", url)
}
system(paste("git clone", url, local_asciidoc_path))
} else {
gert::git_clone(url = url, path = local_asciidoc_path)
}
# reset to the a tagged release: we don't want any unfunctional
# devel stuff in there.
if (is.null(tag)) {
the_tag <- get_current_tag(local_asciidoc_path)
} else {
if (is.na(tag)) {
if (identical(version, "3")) {
the_tag <- "9.1.0"
} else {
the_tag <- get_current_tag(local_asciidoc_path)
}
} else {
the_tag <- tag
}
}
gert::git_reset_hard(repo = local_asciidoc_path,
ref = as.character(the_tag))
asciidoc_source <- normalizePath(list.files(local_asciidoc_path,
pattern =
"^asciidoc.py$",
recursive = TRUE,
full.names = TRUE))
python_version <- get_python_version(python)
min_py_version <- query_min_py_version(file = asciidoc_source,
python_version =
version)
if (!is_version_sufficient(python_version, min_py_version))
throw(paste0("Could find not find python >= ", min_py_version,
"."))
res <- list("python_cmd" = python,
"python_version" = python_version,
"asciidoc_source" = asciidoc_source,
"url" = url,
"tag" = the_tag
)
dump("res", config_file)
}
return(res)
}
get_current_tag <- function(local_asciidoc_path) {
tags <- gert::git_tag_list(repo = local_asciidoc_path)[["name"]]
if (any(grepl("[[:alpha:]]", tags))) {
tags <- tags[-grep("[[:alpha:]]", tags)]
}
current_tag <- sort(package_version(tags), decreasing = TRUE)[1]
return(current_tag)
}
query_min_py_version <- function(file, python_version) {
required <- grep("^MIN_PYTHON_VERSION", readLines(file),
value = TRUE)
min_py_version <- switch(python_version,
"2" = sub("'.*", "",
sub("^MIN_PYTHON_VERSION = '",
"",
required)),
"3" = sub(", ", ".", sub(".*\\((.*)\\).*",
"\\1",
required)),
throw(paste("Could not find python version 2",
"nor python version 3."))
)
return(min_py_version)
}
run_knit <- function(file_name, knit = NA,
write_to_disk = getOption("write_to_disk"),
envir = parent.frame()) {
if (is.na(knit)) {
r_code_pattern <- "//begin.rcode"
if (any(grepl(r_code_pattern, readLines(file_name)))) {
knit <- TRUE
warning("Setting option knit to TRUE based on the file contents!")
}
}
if (is.na(knit)) {
if (grepl("\\.R.*$", file_name)) {
knit <- TRUE
warning("Setting option knit to TRUE based on the file name given!")
}
}
if (isTRUE(knit)) {
output_basename <- sub("\\.[Rr](.*)", ".\\1", basename(file_name))
if (isTRUE(write_to_disk)) {
knit_out_file <- file.path(dirname(file_name), output_basename)
} else {
message(hint_writing(file_name))
knit_out_file <- file.path(tempdir(), output_basename)
}
ops <- options() ## TODO: knitr changes the options?!
file_name <- knitr::knit(file_name, output = knit_out_file,
envir = envir)
options(ops) ## restore old options
}
return(file_name)
}
run_knitr <- function(file_name, working_directory = dirname(file_name),
knit = NA,
hooks = NULL,
write_to_disk = getOption("write_to_disk"),
replacement = NULL,
envir = parent.frame()) {
current_hooks <- knitr::knit_hooks$get()
adjust_asciidoc_hooks(hooks = hooks, replacement = replacement)
on.exit(knitr::knit_hooks$set(current_hooks))
file_name <- normalizePath(file_name)
withr::with_dir(working_directory, {
if (is_spin_file(file_name)) {
content <- knitr::spin(text = readLines(file_name),
knit = TRUE,
report = FALSE,
envir = envir)
output_basename <- sub("\\.[Rr]", ".asciidoc",
basename(file_name))
if (isTRUE(write_to_disk)) {
out_file <- file.path(dirname(file_name),
output_basename)
} else {
message(hint_writing(file_name))
out_file <- file.path(tempdir(),
output_basename)
}
writeLines(content, out_file)
} else {
out_file <- run_knit(file_name, knit = knit,
envir = envir,
write_to_disk = write_to_disk)
}
out_file <- normalizePath(out_file)
})
return(out_file)
}
is_spin_file <- function(file_name) {
is_r_file <- grepl("^.*\\.[rR]$", file_name)
has_roxygen_comment <- any(grepl("^#'", readLines(file_name)))
has_spin_knitr_chunk_options <- any(grepl("^#-|^#\\+",
readLines(file_name)))
is_spin <- is_r_file && has_roxygen_comment || has_spin_knitr_chunk_options
return(is_spin)
}
excerpt_to_file <- function(file_name,
begin_pattern, end_pattern,
exclusion_pattern, inclusion_pattern,
write_to_disk = getOption("write_to_disk"),
output_name = NA) {
if (is.na(output_name))
output_name <- basename(tempfile(fileext = ".Rasciidoc"))
if (isTRUE(write_to_disk)) {
output_directory <- dirname(file_name)
} else {
message(hint_writing(file_name))
output_directory <- tempdir()
}
glbt <- fritools::get_lines_between_tags
excerpt <- glbt(file_name = file_name, keep_tagged_lines = FALSE,
begin_pattern = begin_pattern,
end_pattern = end_pattern,
from_first_line = TRUE, to_last_line = TRUE)
excerpt <- grep(exclusion_pattern, excerpt, invert = TRUE, value = TRUE)
excerpt <- sub(paste0(inclusion_pattern, ".*"), "", excerpt)
# The asciidoc file has to be _here_ for sourcing to work!
excerpt_file <- file.path(output_directory, output_name)
writeLines(excerpt, excerpt_file)
return(excerpt_file)
}
excerpt_no_slides <- function(file_name,
write_to_disk = getOption("write_to_disk")
) {
return(excerpt_to_file(file_name = file_name,
begin_pattern = "^// *end_only_slide",
end_pattern = "^// *begin_only_slide",
inclusion_pattern = "// *no_slide",
exclusion_pattern = "// *slide_only",
write_to_disk = write_to_disk,
output_name = paste0(basename(file_name), "_ex"))
)
}
excerpt_slides <- function(file_name,
write_to_disk = getOption("write_to_disk")
) {
return(excerpt_to_file(file_name = file_name,
begin_pattern = "^// *end_no_slide",
end_pattern = "^// *begin_no_slide",
inclusion_pattern = "// *slide_only",
exclusion_pattern = "// *no_slide",
write_to_disk = write_to_disk,
output_name = sub("(^.*)(\\.[rR]?asc.*)$",
"\\1_slides\\2",
basename(file_name)))
)
}
|
8d99b94c54b9a99520aaed98d25d8e1190006567
|
a72b056bec6bd2fc62e9bdd77c4ce45652fb1b28
|
/run_analysis.R
|
a3428fbb5d333fa1bcc97710a93f1686453b2ba6
|
[] |
no_license
|
gbprime/GetAndCleanDataP2
|
dad548ce4ecd6723c5848c87a54951b85677356d
|
756985f195bda68f3ff980b1f5c990ccbaa8d703
|
refs/heads/master
| 2021-01-01T05:32:37.991174
| 2015-07-26T15:58:05
| 2015-07-26T15:58:05
| 39,521,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,293
|
r
|
run_analysis.R
|
# *********************************************************************************************************
# The purpose of this project is to collect, work with, and clean a data set. The goal is to prepare tidy
# data that can be used for later analysis.
#
# This R script gets, cleans and performs certain operations on data obtained from the accelerometers
# used in the Samsung Galaxy S smartphone.
#
# Project # 2: Getting & Cleaning Data
# *********************************************************************************************************
library(plyr)
# Creates a directory in the specifid location IFF it doesn't already exists. Once the folder is ready,
# the file is downloaded and unzipped.
#
# TODO find a script to determine whether or not a user is using OSX/Linux or Windows
getData <- function() {
# Checks for data directory and creates one if it doesn't exist
if (!file.exists("data")) {
message("Creating data directory...")
dir.create("data")
}
if (!file.exists("data/UCI HAR Dataset")) {
# download the data
message("Downloading & unzipping the data...")
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipfile <- "data/UCI_HAR_data.zip"
download.file(fileURL, destfile=zipfile, method="curl")
# download.file(fileURL, destfile=zipfile)
unzip(zipfile, exdir="data")
}
}
# Merges training and test datasets.
# returns a list of data frames accessible by name (e.g. object$name)
mergeCoordinateDataSets <- function() {
# Read data
training_XCoord <- read.table("data/UCI HAR Dataset/train/X_train.txt")
training_YCoord <- read.table("data/UCI HAR Dataset/train/y_train.txt")
trainingSubject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
test_XCoord <- read.table("data/UCI HAR Dataset/test/X_test.txt")
test_YCoord <- read.table("data/UCI HAR Dataset/test/y_test.txt")
testSubject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
# Merge Results
mergedResultsForX <- rbind(training_XCoord, test_XCoord)
mergedResultsForY <- rbind(training_YCoord, test_YCoord)
mergedResultsSubjects <- rbind(trainingSubject, testSubject)
# merge train and test datasets and return
list(x=mergedResultsForX, y=mergedResultsForY, subject=mergedResultsSubjects)
}
# Extracts only the measurements on the mean and standard deviation for each measurement contained
# in the dataFrame
extractMeanAndStandardDeviation <- function(dataFrame) {
# Read the feature list file
features <- read.table("data/UCI HAR Dataset/features.txt")
# Find the mean and std columns
meanColumn <- sapply(features[,2], function(x) grepl("mean()", x, fixed=T))
standardDeviationColumn <- sapply(features[,2], function(x) grepl("std()", x, fixed=T))
# Extract the information from the dataframe
extractedInfo <- dataFrame[, (meanColumn | standardDeviationColumn)]
colnames(extractedInfo) <- features[(meanColumn | standardDeviationColumn), 2]
return(extractedInfo)
}
# Applies descriptors or names to the activities in the dataframe
# TODO read the activity file and apply names instead of this hack with enumerators.
applyDescriptors <- function(dataFrame) {
colnames(dataFrame) <- "activity"
dataFrame$activity[dataFrame$activity == 1] = "WALKING"
dataFrame$activity[dataFrame$activity == 2] = "WALKING_UPSTAIRS"
dataFrame$activity[dataFrame$activity == 3] = "WALKING_DOWNSTAIRS"
dataFrame$activity[dataFrame$activity == 4] = "SITTING"
dataFrame$activity[dataFrame$activity == 5] = "STANDING"
dataFrame$activity[dataFrame$activity == 6] = "LAYING"
return(dataFrame)
}
applyDescriptors2 <- function(dataFrame) {
activities <- read.table("data/activity_labels.txt")
activities[, 2] = gsub("_", "", tolower(as.character(activities[, 2])))
Y[,1] = activities[Y[,1], 2]
names(Y) <- "activity"
}
# Combine mean & st. deviation datasets (datasetA), activities (datasetB) and
# subjects (datasetC) into one dataframe.
#
# returns a newly created dataframe
bindDataSets <- function(datasetA, datasetB, datasetC) {
cbind(datasetA, datasetB, datasetC)
}
# Creates a tidy dataframe with the average of each variable for each activity and each subject.
#
# returns the tidy dataframe that is then exported as a .CSV file
createTidyDataSet <- function(dataFrame) {
tidy <- ddply(dataFrame, .(subject, activity), function(x) colMeans(x[,1:60]))
return(tidy)
}
main <- function() {
# Get the information
getData()
# Merge training and test datasets
mergedResults <- mergeCoordinateDataSets()
# For each measurement, the mean and standard deviation are extracted
measurementsDataframe <- extractMeanAndStandardDeviation(mergedResults$x)
# Applying descriptors to the activities
descriptorsDataframe <- applyDescriptors(mergedResults$y)
# Applying descriptive names to columns
colnames(mergedResults$subject) <- c("subject")
# Combining all data frames
dataframes <- bindDataSets(measurementsDataframe, descriptorsDataframe, mergedResults$subject)
# Creating the tidy dataset
tidy <- createTidyDataSet(dataframes)
# Create a .TXT file containing the newly created tidy dataset
write.table(tidy, "tidyDataSet.txt", row.names = FALSE)
}
|
9a364174aec6656384373b827099195560fa2f2d
|
6be70ffdb95ed626d05b5ef598b842c5864bac4d
|
/old/tests/make_senate_party_calls_keep_very_lopsided_new_match.R
|
a79b9d912351f840ccfa590ed4a9040e5038ddfe
|
[] |
no_license
|
Hershberger/partycalls
|
c4f7a539cacd3120bf6b0bfade327f269898105a
|
8d9dc31dd3136eae384a8503ba71832c78139870
|
refs/heads/master
| 2021-09-22T17:54:29.106667
| 2018-09-12T21:16:56
| 2018-09-12T21:16:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
make_senate_party_calls_keep_very_lopsided_new_match.R
|
library(partycalls)
set.seed(347611200, kind = "L'Ecuyer")
senate_party_calls <- lapply(93:112, code_party_calls_by_congress_number,
chamber = "senate", pval_threshold = 0.05, sim_annealing = FALSE,
use_new_match_check = TRUE, drop_very_lopsided_votes = FALSE,
hybrid = FALSE, reassign_flip_flop = FALSE)
save(senate_party_calls,
file = "test_data/senate_party_calls_keep_very_lopsided.RData")
|
bab5196665ee3cc05cab6d9f4425971d51373b4e
|
6052995864c5362b030ec1f418518e57b565ba29
|
/cachematrix.R
|
d568fa369d187bbf76c9eec6c8a021325731ebaf
|
[] |
no_license
|
robuchowski/ProgrammingAssignment2
|
cd1309e1ddfdc61280bcc02505e2371e39dfd68a
|
810b0cc4bb0ae4dedb6f830512c931cc0a63ac61
|
refs/heads/master
| 2020-08-30T07:11:42.505166
| 2019-10-29T15:21:43
| 2019-10-29T15:21:43
| 218,301,856
| 0
| 0
| null | 2019-10-29T14:06:09
| 2019-10-29T14:06:08
| null |
UTF-8
|
R
| false
| false
| 1,715
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
##The purpose of the functions in this assignment is to write a pair of functions named
##"makeCacheMatrix" and cacheSolve" that cache the inverse of a matrix. Matrix inversion is usually
## a costly computation and therefore can be beneficial to cache the inverse of a matrix rather than computing it repeatedly.
## Write a short comment describing this function
## Below, I created "makeCacheMatrix", which is a function that creates a special object that stores the matrix and caches its inverse.
##The "makeCacheMatrix" creates a special "matrix" object that can cache its inverse for the input.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<-y
inv <<-NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set=set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## The function "cacheSolve" below computes the inverse of the special "matrix" I created above
## called "makeCacheMatrix". If the inverse has already been calculated, and the matrix has not changed,
## then it should compute the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached result")
return(inv)
}
matr <- x$get()
inv <- solve(matr, ...)
x$setInverse(inv)
inv
}
|
78be9fc44a99401b3db7c4b5d399f4f1e60341d9
|
5c4bff6c12024a093130e925d8240aa1b426b312
|
/plyr_to_purrr1.R
|
5dd9a667caa0e7fc707604ac4cfbd625a8b00d60
|
[] |
no_license
|
mrtnj/rstuff
|
81ef05fb813c19abfede0ea18ef447c9bf9d1081
|
2f8e1939b7009fb4f01dfa17fc8d09aa7550f875
|
refs/heads/master
| 2021-11-23T05:09:17.874029
| 2021-11-13T16:31:56
| 2021-11-13T16:31:56
| 7,859,226
| 6
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,781
|
r
|
plyr_to_purrr1.R
|
## Demonstrate different ways to do plyr-like operations with modern tidyverse
## Needs to be loaded first to not conflict with dplyr
library(plyr)
library(broom)
library(dplyr)
library(magrittr)
library(purrr)
library(tidyr)
set.seed(20210807)
n_groups <- 10
group_sizes <- rpois(n_groups, 10)
n <- sum(group_sizes)
fake_data <- tibble(id = 1:n,
group = rep(1:n_groups,
times = group_sizes),
predictor = runif(n, 0, 1))
group_intercept <- rnorm(n_groups, 0, 1)
fake_data$response <- fake_data$predictor * 10 +
group_intercept[fake_data$group] +
rnorm(n)
fit_model <- function(data) {
lm(response ~ predictor, data)
}
## plyr
models <- dlply(fake_data,
"group",
fit_model)
result <- ldply(models, tidy)
print(result)
## purr without/with pipe
models <- map(split(fake_data,
fake_data$group),
fit_model)
result <- map_df(models,
tidy,
.id = "group")
print(result)
result <- fake_data %>%
split(.$group) %>%
map(fit_model) %>%
map_df(tidy, .id = "group")
print(result)
## List column approach
fake_data_nested <- nest(group_by(fake_data, group),
data = c(id, predictor, response))
fake_data_models <- mutate(fake_data_nested,
model = map(data,
fit_model),
estimates = map(model,
tidy))
result <- unnest(fake_data_models, estimates)
print(result)
fake_data %>%
group_by(group) %>%
nest(data = c(id, predictor, response)) %>%
mutate(model = map(data, fit_model),
estimates = map(model, tidy)) %>%
unnest(estimates) ->
result
print(result)
## Linear mixed model
library(ggplot2)
library(lme4)
model <- lmer(response ~ (1|group) + predictor,
fake_data)
lm_coef <- pivot_wider(result,
names_from = term,
values_from = estimate,
id_cols = group)
lmm_coef <- cbind(group = levels(model@flist$group),
coef(model)$group)
model_coef <- rbind(transform(lm_coef, model = "lm"),
transform(lmm_coef, model = "lmm"))
colnames(model_coef)[2] <- "intercept"
ggplot() +
geom_point(aes(x = predictor,
y = response,
colour = factor(group)),
data = fake_data) +
geom_abline(aes(slope = predictor,
intercept = intercept,
colour = factor(group)
,linetype = model),
data = model_coef) +
theme_bw() +
theme(panel.grid = element_blank())
|
51c24e34a201429c9ecb532ce6456e6c12cacef1
|
f5cbb7ed7532f5fee7e51d9187e6ab5e5d30b624
|
/plot2.R
|
74bd17924d2ecbe5643e87915f515ae4cbb5121d
|
[] |
no_license
|
cdesouza2/ExData_Plotting1
|
886e774aa6e9a73ed2dcad62c20ace7d6bc7a737
|
57ccf9cdf145019f80989370febd157c5a7ffadc
|
refs/heads/master
| 2020-04-18T06:54:19.910044
| 2019-01-24T11:54:26
| 2019-01-24T11:54:26
| 167,341,517
| 0
| 0
| null | 2019-01-24T09:35:05
| 2019-01-24T09:35:04
| null |
UTF-8
|
R
| false
| false
| 517
|
r
|
plot2.R
|
# Contains the code to produce plot2.png
# Assumes the file loadData.R has previously been run in order to load the data
# from the file "household_power_consumption.txt" into the dat variable,
# and where the type converted columns are prefixed with "N",
# and only the rows of interest have been included.
# source("loadData.R")
png(file = "plot2.png", width = 480, height = 480)
with(dat, plot(NDateTime, NGlobal_active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l"))
dev.off()
|
8a3a108d77069939382384ea137c2bde05b72231
|
148ebcd8b769e2e6b1d779dbce0512381a0bfde8
|
/bin/sequence_url_update_nextseq.R
|
d19d9c4cb27040861a43b5af2ca5574e5a163a4e
|
[] |
no_license
|
jdhayes/hts_pipeline
|
f99bd3c3f09d812ec499f37aa19dd5546af9397a
|
72d57fe2bca9ed6a9fea95f4282935bc97aa96aa
|
refs/heads/master
| 2023-04-10T14:54:37.956716
| 2019-12-20T21:39:57
| 2019-12-20T21:39:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 53
|
r
|
sequence_url_update_nextseq.R
|
../post_demultiplex/bin/sequence_url_update_nextseq.R
|
b95e8f40465518d1f095d1b0d98d23502fb06e29
|
b7d91b7c0b71597dff66d10d9852fb5ec42979fe
|
/a.R
|
82a731fdef1a2171ebb76072ee3eb10ea2db7c9f
|
[] |
no_license
|
maidh126/Facial-Expression-Recognition
|
824ccb3ceb3f4859cb3476e5e944e1cb7fe256b3
|
cc0d45dd90dd926caf7c0eb0f5af990969d65794
|
refs/heads/main
| 2023-05-19T23:06:12.432889
| 2021-06-08T17:33:43
| 2021-06-08T17:33:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,803
|
r
|
a.R
|
library(caTools)
library(tidyverse)
library(kernlab)
library(e1071)
library(RColorBrewer)
library(ISLR)
library(caret)
library(ROCR)
library(pROC)
# Set the working directory
setwd("/Users/maido/OneDrive - Oxford Brookes University/Advanced Machine Learning DALT7012/Assessments/grammatical_facial_expression")
# Load the User A data
data1A <- read.delim("a_topics_datapoints.txt", header=TRUE, sep=" ")
target1A <- read.delim("a_topics_targets.txt", header=FALSE)
# Check the real number of positives and negatives in the topics dataset person A
dim(data1A)
dim(target1A)
table(target1A)
# Add the labels (target dataset) onto the data frames
data1A <- cbind(data1A, target1A)
# Remove the first column
data1A <- select(data1A, -X0.0)
# Split the data into a training & testing dataset
set.seed(123)
split1A <- sample.split(data1A$V1, SplitRatio = 0.75)
train1A <- subset(data1A, split1A == TRUE)
test1A <- subset(data1A, split1A == FALSE)
# Feature scaling
train1A[,-301] <- scale(train1A[,-301])
test1A[,-301] <- scale(test1A[,-301])
# Applying Grid Search to find the best parameters
svm1A = train(form = V1 ~ . ,
data = train1A,
method = "svmLinear")
# Show the best parameters
svm1A$bestTune
# Fitting SVM to the train set 1A
svm1A <- svm(formula = V1~.,
data = train1A,
type = "C-classification",
kernel = "linear",
cost = 1,
cross = 10)
# Predicting the test set results
predict1A <- predict(svm1A, newdata = test1A[-301])
# Making the Confusion Matrix
confusionMatrix(factor(predict1A), factor(test1A[,301]))
# comment: accuracy: 95.99%
##### a(ii) 4% ##### Repeat the training for a different facial expression
# Load the User A data conditional
data2A <- read.delim("a_conditional_datapoints.txt", header=TRUE, sep=" ")
target2A <- read.delim("a_conditional_targets.txt", header=FALSE)
# Check the real number of positives and negatives in the dataset
dim(data2A)
table(target2A)
# Add the labels (target dataset) onto the data frames
data2A <- cbind(data2A, target2A)
# Remove the first column
data2A <- select(data2A, -X0.0)
# Split the data into a training & testing dataset
set.seed(123)
split2A <- sample.split(data2A$V1, SplitRatio = 0.75)
train2A <- subset(data2A, split2A == TRUE)
test2A <- subset(data2A, split2A == FALSE)
# Feature scaling
train1A[,-301] <- scale(train1A[,-301])
test1A[,-301] <- scale(test1A[,-301])
# Applying Grid Search to find the best parameters
svm2A = train(form = V1 ~ . ,
data = train2A,
method = "svmLinear")
# Show the best parameters
svm2A$bestTune
# Fitting SVM to the train set 2A
svm2A <- svm(formula = V1~.,
data = train2A,
type = "C-classification",
kernel = "linear",
cost = 1,
cross = 10)
# Predicting the test set results
predict2A <- predict(svm2A, newdata = test2A[-301])
# Making the Confusion Matrix
confusionMatrix(factor(predict2A), factor(test2A[,301]))
# comment: accuracy: 97.06%
##### a(iii) 8% ##### Extra marks for coding your own implementation of the chosen classifier
# SVM implementation
svm.fit = function(X, y, C=NULL) {
n.samples = nrow(X)
n.features = ncol(X)
K = matrix(rep(0, n.samples*n.samples), nrow=n.samples)
for (i in 1:n.samples){
for (j in 1:n.samples){
K[i,j] = X[i,] %*% X[j,] }}
Dmat = outer(y,y) * K
Dmat = as.matrix(nearPD(Dmat)$mat)
dvec = rep(1, n.samples)
Amat = rbind(y, diag(n.samples), -1*diag(n.samples))
bvec = c(0, rep(0, n.samples), rep(-C, n.samples))
res = solve.QP(Dmat,dvec,t(Amat),bvec=bvec, meq=1)
a = res$solution
bomega = apply(a*y*X,2,sum)
return(bomega)
}
|
ba1e8ebd2fd1a7c74a3b8c36c34c19433136ab7f
|
1f579a6cd3a03ff52b2d54f96baaad5714aa3f1a
|
/server.R
|
3e03309d0ce1a2811d7648d5cdd1def509aecc29
|
[] |
no_license
|
chuagh74/COVID19SG
|
31edb9691b7f571e81f1ef0e45df020325b0e349
|
c127dafbd98c3d209a7777381d1755bf1086fc3c
|
refs/heads/master
| 2021-04-23T18:07:02.702151
| 2020-03-25T11:45:40
| 2020-03-25T11:45:40
| 249,960,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70,587
|
r
|
server.R
|
shinyServer(function(input, output, session) {
source("global.R")
######################################################################
# Main View
######################################################################
output$colorMsg=renderUI({
msg=sprintf("Color codes used for icons. <br />Color is used to differentiate places with recent visits by infected cases from those that already has a longer period not being visited.")
HTML(msg)
})
output$unlinkedPlot=renderPlotly({
unlinkedData$Date=as.Date(unlinkedData$Date,'%d-%b-%Y')
unlinkedData=unlinkedData[order(unlinkedData$Date),]
plot_ly(unlinkedData,x=~Date,y=~CumuulativeUnlinked,type='scatter',mode='lines+markers') %>% layout(title='Cumulative unlinked cases')
})
output$localImportPlot=renderPlotly({
importData=dataList[['CLUSTER']][which(dataList[['CLUSTER']]$Cluster %in% importClusters & dataList[['CLUSTER']]$LinkType=='Direct'),]
importedAgg=aggregate(PersonID~DateConfirmed,dataList[['PERSON']][which(dataList[['PERSON']]$PersonID %in% importData$PersonID),],length)
names(importedAgg)[which(names(importedAgg)=='PersonID')]='Imported'
localAgg=aggregate(PersonID~DateConfirmed,dataList[['PERSON']][which(!(dataList[['PERSON']]$PersonID %in% importData$PersonID)),],length)
names(localAgg)[which(names(localAgg)=='PersonID')]='Local'
importLocalData=merge(importedAgg,localAgg,by='DateConfirmed',all.x=TRUE,all.y=TRUE)
importLocalData[is.na(importLocalData)]=0
importLocalData$DateConfirmed=as.Date(as.character(importLocalData$DateConfirmed),'%d-%b-%Y')
importLocalDataMelt=melt(importLocalData,id.vars='DateConfirmed')
names(importLocalDataMelt)[which(names(importLocalDataMelt)=='variable')]='CaseType'
importLocalDataMelt=importLocalDataMelt[order(importLocalDataMelt$DateConfirmed),]
plot_ly(importLocalDataMelt,x=~DateConfirmed,y=~value,color=~CaseType,type='scatter',mode='lines+markers') %>% layout(title='Imported cases vs Local cases')
})
output$importImpact=renderDT({
indirectImports=dataList[['CLUSTER']][which(dataList[['CLUSTER']]$Cluster=='ImportNew'),]
indirectImports=indirectImports[which(indirectImports$LinkType=='Indirect'),]
indirectImportsRel=dataList[['PERSONPERSON']][which(dataList[['PERSONPERSON']]$PersonID1 %in% indirectImports$PersonID | dataList[['PERSONPERSON']]$PersonID2 %in% indirectImports$PersonID),]
importLinkDf=NULL
for (i in 1:nrow(indirectImportsRel))
{
importLinkDf=rbind(importLinkDf,data.frame('Import'=sprintf("Case %s: %s",indirectImportsRel$PersonID1[i],dataList[['PERSON']]$Notes[which(dataList[['PERSON']]$PersonID==indirectImportsRel$PersonID1[i])]),'Infected by Import'=sprintf("Case %s: %s",indirectImportsRel$PersonID2[i],dataList[['PERSON']]$Notes[which(dataList[['PERSON']]$PersonID==indirectImportsRel$PersonID2[i])]),Relation=sprintf("%s - %s",indirectImportsRel$Relation1[i],indirectImportsRel$Relation2[i]),stringsAsFactors=FALSE))
}
formatDTDisplay(importLinkDf)
})
output$importForm=renderUI({
dataList[['PERSON']]$DateConfirmed=as.Date(dataList[['PERSON']]$DateConfirmed,'%d-%b-%Y')
inputPanel(
sliderInput(inputId = "importDateSelect", label = "Import Case Analysis Date",min=min(as.Date(dataList[['PERSON']]$DateConfirmed)),max=max(as.Date(dataList[['PERSON']]$DateConfirmed)),value=c(min(as.Date(dataList[['PERSON']]$DateConfirmed)),max(as.Date(dataList[['PERSON']]$DateConfirmed))))
)
})
residencyType=reactive({
residencyType=unique(dataList[['PERSON']]$ResidencyType)
return(residencyType)
})
resiColors=reactive({
n=length(unique(dataList[['PERSON']]$ResidencyType))
resiColors = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))[1:n]
return(resiColors)
})
importCaseData=reactive({
importDateSelect=as.Date(input$importDateSelect)
residencyType=residencyType()
dataList[['PERSON']]$ResidencyType[is.na(dataList[['PERSON']]$ResidencyType)]=dataList[['PERSON']]$Nationality[is.na(dataList[['PERSON']]$ResidencyType)]
importCaseData=dataList[['PERSON']][which(dataList[['PERSON']]$PersonID %in% dataList[['CLUSTER']]$PersonID[which(dataList[['CLUSTER']]$LinkType=='Direct' & dataList[['CLUSTER']]$Cluster %in% importClusters)]),]
importCaseData$DateConfirmed=as.Date(importCaseData$DateConfirmed,'%d-%b-%Y')
importCaseData=importCaseData[which(importCaseData$DateConfirmed>=importDateSelect[1] & importCaseData$DateConfirmed<=importDateSelect[2]),]
importCaseData$DateConfirmed=as.Date(importCaseData$DateConfirmed,'%d-%b-%Y')
importCaseData$ResidencyType=factor(importCaseData$ResidencyType,levels=residencyType)
return(importCaseData)
})
caseResidImpData=reactive({
importCaseData=importCaseData()
caseResidImpData=aggregate(PersonID~ResidencyType+DateConfirmed,importCaseData,length)
caseResidImpData$PersonID=as.numeric(caseResidImpData$PersonID)
return(caseResidImpData)
})
output$caseResImportPlot=renderPlotly({
caseResidImpData=caseResidImpData()
resiColors=resiColors()
plot_ly(caseResidImpData,x=~DateConfirmed,y=~PersonID,color=~ResidencyType,type='bar',colors=resiColors) %>% layout(barmode='stack', title='Import Cases residency type against date confirmed')
})
output$caseResImportPie=renderPlotly({
caseResidImpData=caseResidImpData()
resiColors=resiColors()
caseResidImpAgg=aggregate(PersonID~ResidencyType,caseResidImpData,sum)
totalCnt=sum(caseResidImpAgg$PersonID)
plot_ly(caseResidImpAgg,values=~PersonID,labels=~ResidencyType,type='pie',hole = 0.5,marker=list(colors=resiColors,line = list(color = '#FFFFFF', width = 1)),textposition='inside',textinfo='label+percent+value') %>% layout(title='Import Cases Residency type',xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),annotations=list(text=as.character(totalCnt),'showarrow'=F, font=list(size = 40)),showlegend = FALSE)
})
caseResidData=reactive({
residencyType=residencyType()
inputData=dataList[['PERSON']]
inputData$DateConfirmed=as.Date(inputData$DateConfirmed,'%d-%b-%Y')
caseResidData=aggregate(PersonID~ResidencyType+DateConfirmed,inputData,length)
caseResidData$ResidencyType=factor(as.character(caseResidData$ResidencyType),levels=residencyType)
return(caseResidData)
})
output$caseResidencyPlot=renderPlotly({
caseResidData=caseResidData()
resiColors=resiColors()
plot_ly(caseResidData,x=~DateConfirmed,y=~PersonID,color=~ResidencyType,type='bar',marker=list(colors=resiColors)) %>% layout(barmode='stack', title='Cases residency type against date confirmed')
})
output$caseResidencyPie=renderPlotly({
caseResidData=caseResidData()
resiColors=resiColors()
caseResidAgg=aggregate(PersonID~ResidencyType,caseResidData,sum)
plot_ly(caseResidAgg,values=~PersonID,labels=~ResidencyType,type='pie',textposition='inside',textinfo='label+percent+value',marker=list(colors=resiColors)) %>% layout(title='Cases Residency type',xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),showlegend = FALSE)
})
caseNatData=reactive({
inputData=dataList[['PERSON']]
inputData$DateConfirmed=as.Date(inputData$DateConfirmed,'%d-%b-%Y')
caseNatData=aggregate(PersonID~Nationality+DateConfirmed,inputData,length)
return(caseNatData)
})
output$caseNationalePlot=renderPlotly({
caseNatData=caseNatData()
plot_ly(caseNatData,x=~DateConfirmed,y=~PersonID,color=~Nationality,type='bar') %>% layout(barmode='stack', title='Cases nationality type against date confirmed')
})
output$caseNationalePie=renderPlotly({
caseNatData=caseNatData()
caseNatAgg=aggregate(PersonID~Nationality,caseNatData,sum)
plot_ly(caseNatAgg,values=~PersonID,labels=~Nationality,type='pie',textposition='inside',textinfo='label+percent+value') %>% layout(title='Cases Nationality type',xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE), yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),showlegend = FALSE)
})
output$colorMsgTable=renderUI({
colorCodeAgg=aggregate(PlaceID~colorCode,PERSONPLACEAgg,length)
colocCodeCaseCnt=aggregate(CaseCnt~colorCode,PERSONPLACEAgg,sum)
colorCodeCntMap=setNames(colorCodeAgg$PlaceID,colorCodeAgg$colorCode)
colocCodeCaseCntMap=setNames(colocCodeCaseCnt$CaseCnt,colocCodeCaseCnt$colorCode)
cntMsg=''
caseMsg=''
for (i in c('red','orange','yellow','grey','green'))
{
cntMsg=sprintf("%s<td>%s</td>",cntMsg,ifelse(is.na(colorCodeCntMap[i]),0,colorCodeCntMap[i]))
caseMsg=sprintf("%s<td>%s</td>",caseMsg,ifelse(is.na(colocCodeCaseCntMap[i]),0,colocCodeCaseCntMap[i]))
}
msg=sprintf("<table border=1 width='50%%'><tr align='center'><td><b>Color</b></td><td>Red</td><td>Orange</td><td>Yellow</td><td>Grey</td><td>Green</td></tr><tr align='center'><td><b>Days free fr Visit</b></td><td>>=0-3<</td><td>>=3-7<</td><td>>=7-14<</td><td>>=14-28<</td><td>>=28</td></tr><tr align='center'><td><b>Place Count</b></td>%s</tr><tr align='center'><td><b>Case x Visit Count</b></td>%s</tr></table>",cntMsg,caseMsg)
HTML(msg)
})
PERSONPLACEAggSub=reactive({
mapColorSelect=input$mapColorSelect
PERSONPLACEAggSub=PERSONPLACEAgg[which(PERSONPLACEAgg$colorCode %in% mapColorSelect),]
return(PERSONPLACEAggSub)
})
output$PPAggSub=renderDT({
PERSONPLACEAggSub=PERSONPLACEAggSub()
formatDTDisplay(PERSONPLACEAggSub[,c('Name','Remarks','colorCode','CaseCnt','DaysFrDateEnd','DateStart','DateEnd')],selectChoice='single')
})
output$PERSONPLACEAggMsg=renderUI({
PERSONPLACEAggSub=PERSONPLACEAggSub()
PPAggSub_rows_selected=input$PPAggSub_rows_selected
if (length(PPAggSub_rows_selected)>0)
{
HTML(PERSONPLACEAggSub$Display[PPAggSub_rows_selected])
}
})
output$trackMap=renderLeaflet({
mapColorSelect=input$mapColorSelect
PERSONPLACEAggSub=PERSONPLACEAggSub()
PPAggSub_rows_selected=input$PPAggSub_rows_selected
if (length(PPAggSub_rows_selected)>0)
{
PERSONPLACEAggSub$icon[PPAggSub_rows_selected]='Star'
}
basemap=leaflet() %>% addTiles(group = "OSM") #%>% addProviderTiles("Stamen.TonerLite")
basemap %>% addMarkers(data = PERSONPLACEAggSub, ~longitude,~latitude,popup = ~Display,icon=placeIcons[PERSONPLACEAggSub$icon],clusterOptions = markerClusterOptions(),layerId=PERSONPLACEAggSub$PlaceID)
})
output$placeTimeVisTbl=renderTimevis({
PPAggSub_rows_selected=input$PPAggSub_rows_selected
PERSONPLACEAggSub=PERSONPLACEAggSub()
if (length(PPAggSub_rows_selected)>0)
{
PERSONPLACESub=PERSONPLACE[which(PERSONPLACE$PlaceID %in% PERSONPLACEAggSub$PlaceID[PPAggSub_rows_selected]),]
data1=data.frame(id=PERSONPLACESub$PersonID,start=PERSONPLACESub$DateStart,end=PERSONPLACESub$DateEnd,content=sprintf("Case%d (%s)",PERSONPLACESub$PersonID,PERSONPLACESub$Remarks),stringsAsFactors=FALSE)
data1=data.frame(id=PERSONPLACESub$PersonID,start=PERSONPLACESub$DateStart,end=PERSONPLACESub$DateEnd,content=sprintf("Case%d (%s)",PERSONPLACESub$PersonID,PERSONPLACESub$Remarks),stringsAsFactors=FALSE)
data1$type='range'
data1=rbind(data1,data.frame(id=max(data1$id)+1,start=format(timeVisStartDate,"%d-%b-%Y"),end=format(todayDate,"%d-%b-%Y"),content='',type='background'))
timevis(data1,options = list(editable = TRUE, multiselect = TRUE, align = "center"))
}
})
output$clusterMap=renderLeaflet({
mapColorSelect=input$mapColorSelect
PersonPlaceSub=data.table(dataList[['PERSONPLACE']][which(dataList[['PERSONPLACE']]$Remarks!='Warded' & dataList[['PERSONPLACE']]$colorCode %in% mapColorSelect),])
basemap=leaflet() %>% addTiles(group = "OSM") #%>% addProviderTiles("Stamen.TonerLite")
if (nrow(PersonPlaceSub)>0)
{
ClusterSummary=PersonPlaceSub[,.(DateEnd=max(DateEnd),DateStart=min(DateStart),CaseVisitDayCnt=.N,DaysFrDateEnd=min(DaysFrDateEnd),Places=paste(unique(Name),collapse=", ")),by=list(PlaceCluster,PlaceClusterLongitude,PlaceClusterLatitude)]
basemap %>% addMinicharts(ClusterSummary$PlaceClusterLongitude,ClusterSummary$PlaceClusterLatitude,chartdata=ClusterSummary$CaseVisitDayCnt,showLabels=TRUE,width=45)
} else
{
basemap
}
})
output$clusterDT=renderDT({
mapColorSelect=input$mapColorSelect
PersonPlaceSub=data.table(dataList[['PERSONPLACE']][which(dataList[['PERSONPLACE']]$Remarks!='Warded' & dataList[['PERSONPLACE']]$colorCode %in% mapColorSelect),])
basemap=leaflet() %>% addTiles(group = "OSM") #%>% addProviderTiles("Stamen.TonerLite")
PersonPlaceSub$Case=sprintf("Case%d",PersonPlaceSub$PersonID)
formatDTDisplay(PersonPlaceSub[,c('PlaceCluster','Name','Case','colorCode','DateStart','DateEnd','DaysFrDateEnd')])
})
# observeEvent(input$placeTimeVisTbl_selected, {
# showModal(modalDialog(
# title = "Somewhat important message",
# paste(input$placeTimeVisTbl_selected, "has been selected"),
# easyClose = TRUE,
# footer = NULL
# ))
# })
data <- reactiveValues(clickedMarker=NULL)
observeEvent(input$trackMap_marker_click,{
data$clickedMarker = input$trackMap_marker_click
selectedPERSONPLACE=PERSONPLACEAgg[which(PERSONPLACEAgg$PlaceID==data$clickedMarker$id),]
PERSONPLACESub=PERSONPLACE[which(PERSONPLACE$PlaceID %in% selectedPERSONPLACE$PlaceID),]
output$mapSelectMsg=renderUI({
#DateEnd=as.Date(max(selectedPERSONPLACE$DateEnd),format='%d-%b-%Y')
#daysPast=as.numeric(difftime(todayDate,DateEnd,unit='days'))
# msg=sprintf("<table><tr bgcolor='%s'><td><b>%s</b><br />Last visit: %s (Estimated last appearance %d days ago)<br />%d case(s) visited</td></tr></table>",dayColorCode(daysPast),unique(selectedPERSONPLACE$Name),DateEnd,daysPast,nrow(selectedPERSONPLACE))
# msg=sprintf("%s<br><table>",msg)
# for (i in 1:nrow(selectedPERSONPLACE))
# {
# msg=sprintf("%s<tr bgcolor='lightgray'><td>Case%s</td></tr>",msg,selectedPERSONPLACE$PersonID[i])
# msg=sprintf("%s<tr><td>%s</td></tr>",msg,selectedPERSONPLACE$VisitInfo[i])
# msg=sprintf("%s<tr><td>%s</td></tr>",msg,selectedPERSONPLACE$Report[i])
# }
# msg=sprintf("%s</table>",msg)
HTML(selectedPERSONPLACE$Display)
})
output$placeTimeVis=renderTimevis({
data1=data.frame(id=PERSONPLACESub$PersonID,start=PERSONPLACESub$DateStart,end=PERSONPLACESub$DateEnd,content=sprintf("Case%d (%s)",PERSONPLACESub$PersonID,PERSONPLACESub$Remarks),type='range',stringsAsFactors=FALSE)
data1=rbind(data1,data.frame(id='period',start=format(timeVisStartDate,"%d-%b-%Y"),end=format(todayDate,"%d-%b-%Y"),content='',type='background'))
timevis(data1)
})
})
output$clusterForm=renderUI({
CLUSTER=dataList[['CLUSTER']]
selectInput('clusterSelect','Highght Cluster',c('All',unique(CLUSTER$Cluster)),'All')
})
networkList=reactive({
shiny::isolate({
shiny::withProgress({
shiny::setProgress(message = "Preparing network")
reportMap=setNames(dataList[['PERSON']]$Notes,as.character(dataList[['PERSON']]$PersonID))
nodesPerson=data.frame(id=sprintf("Case%d",dataList[['PERSON']]$PersonID),label=sprintf("Case%s %s(%s, %s)",dataList[['PERSON']]$PersonID,dataList[['PERSON']]$Gender,dataList[['PERSON']]$Age,dataList[['PERSON']]$Source),Source=dataList[['PERSON']]$Source,stringsAsFactors=FALSE)
nodesPerson$Type='Warded'
nodesPerson$Source=NULL
nodesPerson$title=sprintf("%s<br />%s",nodesPerson$id,reportMap[gsub("Case","",nodesPerson$id)])
#nodesPlace=data.frame(id=sprintf("Place%d",dataList[['PLACE']]$PlaceID),label=sprintf("%s",dataList[['PLACE']]$Name),stringsAsFactors=FALSE)
# nodesPlace$Type='PLACE'
nodesCluster=data.frame(id=unique(dataList[['CLUSTER']]$Cluster),label=sprintf("(Cluster)%s",unique(unique(dataList[['CLUSTER']]$Cluster))),stringsAsFactors=FALSE)
nodesCluster$Type='Cluster'
CLUSTER=dataList[['CLUSTER']]
CLUSTER$Cluster[which(CLUSTER$Cluster %in% c('UNLINKED','OTH_Linked'))]='Local unknown source'
clusterAgg=aggregate(PersonID~Cluster+LinkType,CLUSTER,length)
clusterAggSrd=spread(clusterAgg,key='LinkType',value='PersonID')
clusterAggSrd[is.na(clusterAggSrd)]=0
clusterMap=setNames(sprintf("%d cases directly linked. %d cases indirectly linked.",clusterAggSrd$Direct,clusterAggSrd$Indirect),clusterAggSrd$Cluster)
nodesCluster$title=sprintf("%s<br />%s",nodesCluster$id,clusterMap[nodesCluster$id])
#nodes=rbind(nodesPerson,nodesPlace)
nodes=rbind(nodesPerson,nodesCluster)
colors = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))[1:length(unique(nodes$Type))]
#nodes$color=colors[as.numeric(as.factor(nodes$Type))]
#shapeMap=setNames(c('dot','triangle','star'),c('PERSON','PLACE','Cluster'))
#nodes$shape=shapeMap[nodes$Type]
#nodes$shape=shapeMap[nodes$Type]
edgePerson=data.frame(from=sprintf("Case%d",dataList[['PERSONPERSON']]$`PersonID1`),to=sprintf("Case%d",dataList[['PERSONPERSON']]$`PersonID2`),label=sprintf("(Case%d)%s-(Case%d)%s",dataList[['PERSONPERSON']]$`PersonID1`,dataList[['PERSONPERSON']]$Relation1,dataList[['PERSONPERSON']]$`PersonID2`,dataList[['PERSONPERSON']]$Relation2),stringsAsFactors=FALSE)
edgePerson$Type='PERSON'
#placeMap=setNames(dataList[['PLACE']]$Name,dataList[['PLACE']]$PlaceID)
#edgePlace=data.frame(from=sprintf("Case%d",dataList[['PERSONPLACE']]$`PersonID`),to=sprintf("Place%d",dataList[['PERSONPLACE']]$`PlaceID`),label=sprintf("%s-%s-%s (%s-%s)",dataList[['PERSONPLACE']]$PersonID,dataList[['PERSONPLACE']]$Remarks,placeMap[dataList[['PERSONPLACE']]$PlaceID],dataList[['PERSONPLACE']]$DateStart,dataList[['PERSONPLACE']]$DateEnd),Remarks=dataList[['PERSONPLACE']]$Remarks,stringsAsFactors=FALSE)
#edgePlace$Type='PLACE'
#edgePlace$Type[which(edgePlace$Remarks=='Warded')]='HOSP'
#edgePlace$Remarks=NULL
edgeCluster=data.frame(from=dataList[['CLUSTER']]$Cluster,to=sprintf("Case%d",dataList[['CLUSTER']]$PersonID),label=sprintf("Case%s linked to Cluster %s",dataList[['CLUSTER']]$PersonID,dataList[['CLUSTER']]$Cluster),stringsAsFactors=FALSE)
edgeCluster$Type='Cluster'
#edges=rbind(edgePerson,edgePlace)
edges=rbind(edgePerson,edgeCluster)
nodeValue=table(c(edges$to[which(edges$Type %in% c('PERSON','Cluster'))],edges$from[which(edges$Type %in% c('PERSON','Cluster'))]))
nodes$value=nodeValue[nodes$id]
nodes$value[which(is.na(nodes$value))]=0
nodes$title=nodes$label
edges$title=edges$label
edges$label=''
#edges$label[which(edges$value<=5)]=''
nodes$font.size=unlist(lapply(nodes$value,function(x) min(x*10,100)))
nodes$font.size[which(nodes$Type!='Cluster')]=10
#nodes$color[which(nodes$Type=='Cluster')]='orange'
# for (i in 1:nrow(nodes))
# {
# if (length(grep("PERSON",nodes$Type[i]))>0)
# {
# filename=sprintf("%s/%s",reportDir,nodes$id[i])
# if (file.exists(filename))
# {
# reportOrg=readChar(filename, file.info(filename)$size)
# report=gsub("\r\n","<br />",reportOrg)
# #print(report)
# nodes$title[i]=sprintf("%s<br />%s",nodes$title[i],report)
# }
# }
# }
# nodes$title[i]=sprintf("%s<br />%s",nodes$title[i],reportMap[gsub("Case","",nodes$id)])
deathList=dataList[['PERSON']]$PersonID[which(!is.na(dataList[['PERSON']]$Death))]
dischargeList=setdiff(dataList[['PERSON']]$PersonID[which(!is.na(dataList[['PERSON']]$Discharge))],deathList)
nodes$group=nodes$Type
nodes$group[which(nodes$id %in% sprintf("Case%d",deathList))]='Deceased'
nodes$group[which(nodes$id %in% sprintf("Case%d",dischargeList))]='Discharged'
networkList=list()
networkList[['nodes']]=nodes
networkList[['edges']]=edges
return(networkList)
})
})
})
output$personDT=renderDT({
PERSONTMP=dataList[['PERSON']]
PERSONTMP$Report=NULL
PERSONTMP$Info=NULL
PERSONTMP$Info1=NULL
formatDTDisplay(PERSONTMP)
})
output$lobNetwork=renderVisNetwork({
#clusterSelect=input$clusterSelect
#personDT_rows_selected=input$personDT_rows_selected
networkList=networkList()
#if (!is.null(clusterSelect))
#{
#CLUSTER=dataList[['CLUSTER']]
nodes=networkList[['nodes']]
edges=networkList[['edges']]
#edges=edges[which(edges$Type!='HOSP'),]
#nodes$color[which(nodes$id %in% sprintf("Case%d",dataList[['PERSON']]$PersonID[which(dataList[['PERSON']]$Transport=='Evacuation')]))]='lightblue'
# if (length(personDT_rows_selected)>0)
# {
# nodes$shape[which(nodes$id %in% sprintf("Case%d",dataList[['PERSON']]$PersonID[personDT_rows_selected]))]='diamond'
# nodes$color[which(nodes$id %in% sprintf("Case%d",dataList[['PERSON']]$PersonID[personDT_rows_selected]))]='red'
# nodes$value[which(nodes$id %in% sprintf("Case%d",dataList[['PERSON']]$PersonID[personDT_rows_selected]))]=10
# }
# if (clusterSelect!='All')
# {
# clusterList=CLUSTER$PersonID[which(CLUSTER$Cluster %in% clusterSelect)]
# nodes$groupback=nodes$group
# nodes$group=sprintf("%s-grey",nodes$groupback)
# nodes$group[which(nodes$id %in% sprintf("Case%d",clusterList))]=nodes$groupback[which(nodes$id %in% sprintf("Case%d",clusterList))]
# nodes$group[which(nodes$id %in% sprintf("Place%d",dataList[['PERSONPLACE']]$PlaceID[which(dataList[['PERSONPLACE']]$PersonID %in% clusterList)]))]=nodes$groupback[which(nodes$id %in% sprintf("Place%d",dataList[['PERSONPLACE']]$PlaceID[which(dataList[['PERSONPLACE']]$PersonID %in% clusterList)]))]
# nodes$group[which(nodes$id==clusterSelect)]=nodes$groupback[which(nodes$id==clusterSelect)]
# }
#graph <- graph.data.frame(edges)
#degree_value <- degree(graph)
nodes$value=2*nodes$value
if (nrow(nodes)>0 & nrow(edges)>0)
{
visNetworkGraph=visNetwork(nodes, edges, width = "100%") %>% visEvents(click = "function(nodes){ Shiny.onInputChange('click', nodes.nodes[0]);;}") %>% visGroups(groupname="Discharged",color='lightgreen',shape='dot') %>% visGroups(groupname="Deceased",color='red',shape='dot') %>% visGroups(groupname="Warded",color='lightblue',shape='dot') %>% visGroups(groupname="Cluster",color='orange',shape='star') %>% visLegend()
# if (clusterSelect!='All')
# {
# visNetworkGraph = visNetworkGraph %>% visGroups(groupname="Imported-grey",color='grey',shape='dot') %>% visGroups(groupname="Local Tx-grey",color='grey',shape='dot') %>% visGroups(groupname="PLACE-grey",color='grey',shape='triangle') %>% visGroups(groupname="HOSP-grey",color='grey',shape='diamond') %>% visGroups(groupname="Cluster-grey",color='grey',shape='star')
# }
return(visNetworkGraph)
} else
{
return(NULL)
}
#}
})
output$personVisitMapMsg=renderUI({
nodeid=input$click
networkList=networkList()
nodes=networkList[['nodes']]
selectedNode=nodes[which(nodes$id==nodeid),]
if (length(grep('^Case',selectedNode$id))>0)
{
msg=sprintf("Places visited by %s",selectedNode$id)
} else
if (length(grep('^Place',selectedNode$id))>0)
{
placename=dataList[['PLACE']]$Name[which(sprintf("Place%d",dataList[['PLACE']]$PlaceID)==nodeid)]
msg=sprintf("Location: %s (%s)<br />Exposure period of cases at this location",selectedNode$id,placename)
} else
{
msg=sprintf("Cluster: %s<br />Cases in cluster public exposure period",selectedNode$id)
}
HTML(msg)
})
output$personVisitMap=renderLeaflet({
nodeid=input$click
if (length(grep("^Case",nodeid))>0)
{
networkList=networkList()
nodes=networkList[['nodes']]
selectedNode=nodes[which(nodes$id==nodeid),]
PERSONPLACE=dataList[['PERSONPLACE']]
if (length(grep("PERSON",selectedNode$Type))>0)
{
PersonID=gsub("Case","",selectedNode$id)
PERSONPLACESub=PERSONPLACE[which(PERSONPLACE$PersonID==PersonID),]
basemap=leaflet() %>% addTiles(group = "OSM") #%>% addProviderTiles("Stamen.TonerLite")
basemap %>% addMarkers(data = PERSONPLACESub, ~longitude,~latitude,icon=placeIcons[PERSONPLACESub$icon],popup = ~Display,clusterOptions = markerClusterOptions())
}
}
})
output$mapMsg=renderUI({
nodeid=input$click
if ('click' %in% names(input))
{
if (!is.null(nodeid))
{
if (length(grep("^Case",nodeid))>0)
{
htmltools::tagList(list(leafletOutput('personVisitMap'),renderUI(HTML(dataList[['PERSON']]$Report[which(dataList[['PERSON']]$PersonID==gsub('Case','',nodeid))]))))
} else
if (length(grep("^Place",nodeid))>0)
{
timevisOutput('mapMsgTimevisPlace')
} else
if (nodeid %in% dataList[['CLUSTER']]$Cluster)
{
timevisOutput('mapMsgTimevisCluster')
#dataList[['CLUSTER']][which(dataList[['CLUSTER']]$Cluster==nodeid)]
}
}
}
})
output$mapMsgTimevisPlace=renderTimevis({
nodeid=input$click
PERSONPLACESub=dataList[['PERSONPLACE']][which(dataList[['PERSONPLACE']]$PlaceID %in% gsub("Place","",nodeid)),]
tmpData=data.frame(start=PERSONPLACESub$DateStart,end=PERSONPLACESub$DateEnd,content=sprintf("Case%d",PERSONPLACESub$PersonID),type='range',stringsAsFactors=FALSE)
tmpDataA=rbind(tmpData,data.frame(start=format(min(as.Date(tmpData$start,'%d-%b-%Y')),'%d-%b-%Y'),end=format(todayDate,'%d-%b-%Y'),content='',type='background'))
timevis(tmpDataA)
})
output$mapMsgTimevisCluster=renderTimevis({
data1=data1()
nodeid=input$click
data1Sub=data1[which(data1$groupName==nodeid),]
timevis(data1Sub)
})
data1=reactive({
PERSON=dataList[['PERSON']]
PERSON$Notes[which(is.na(PERSON$Notes))]=''
data1=data.frame(Age=PERSON$Age,id=PERSON$PersonID,start=PERSON$ExpStartDate,end=PERSON$IsolationDate,content=sprintf("Case%d (%s,%s yrs old), %s, Confirmed: %s",PERSON$PersonID,PERSON$Gender,PERSON$Age,PERSON$Notes,format(PERSON$AnnouncementDate,'%d-%b-%Y')),groupName=PERSON$groupName,stringsAsFactors=FALSE)
data1=data1[order(data1$id,decreasing=TRUE),]
groupMap=setNames(1:length(unique(data1$groupName)),unique(data1$groupName))
data1$group=groupMap[data1$groupName]
#data1$end=format(data1$end,"%d-%b-%Y")
data1$end[which(is.na(data1$end))]=format(todayDate,"%d-%b-%Y")
data1$className="greenBg"
data1$className[which(data1$Age>60)]="blueBg"
data1$type='range'
data1$Age=NULL
data1=rbind(data1,data.frame(id=max(data1$id)+1,start=format(timeVisStartDate,"%d-%b-%Y"),end=format(todayDate,"%d-%b-%Y"),content='',type='background',groupName=NA,group=NA,className='whiteBg'))
data1$start=as.Date(data1$start,'%d-%b-%Y')
data1$end=as.Date(data1$end,'%d-%b-%Y')
return(data1)
})
output$personExpTimeline=renderTimevis({
data1=data1()
groups=data.frame(id=1:length(unique(data1$groupName[which(!is.na(data1$groupName))])),content=ClusterSummaryNoteMap[unique(data1$groupName[which(!is.na(data1$groupName))])])
timevis(data1,groups)
})
output$expCntChart=renderPlotly({
data1=data1()
minDate=min(data1$start)
maxDate=max(data1$end)
expDateDf=NULL
for (i in as.character(seq(minDate,maxDate,by='days')))
{
iDate=as.Date(i,'%Y-%m-%d')
tmpData=data1[which(data1$start<=iDate & data1$end>=iDate),]
expDateDf=rbind(expDateDf,data.frame(Date=i,Count=nrow(tmpData),stringsAsFactors=FALSE))
}
expDateDf$Date=as.Date(expDateDf$Date,'%Y-%m-%d')
plot_ly(expDateDf,x=~Date,y=~Count,mode='lines+markers')
})
output$data1=renderDT({
data1=data1()
formatDTDisplay(data1)
})
##################################################################
# By Date
##################################################################
# output$DateInfectPlot=renderPlotly({
#
# })
output$expCntChartDate=renderPlotly({
data1=data1()
minDate=min(data1$start)
maxDate=max(data1$end)
PERSON$DateConfirmed=as.Date(PERSON$DateConfirmed,'%d-%b-%Y')
expDateDf=NULL
for (i in as.character(seq(minDate,maxDate,by='days')))
{
iDate=as.Date(i,'%Y-%m-%d')
tmpData=data1[which(data1$start<=iDate & data1$end>=iDate),]
cntConfirmed=ifelse(length(which(as.character(dataList[['TimeTrack']]$Date,'%Y-%m-%d')==i))>0,dataList[['TimeTrack']]$Positive[which(as.character(dataList[['TimeTrack']]$Date,'%Y-%m-%d')==i)],0)
expDateDf=rbind(expDateDf,data.frame(Date=i,ExposureCount=nrow(tmpData),InfectedCount=cntConfirmed,stringsAsFactors=FALSE))
}
expDateDf$Date=as.Date(expDateDf$Date,'%Y-%m-%d')
expDateDfMelt=melt(expDateDf,id.vars='Date')
plot_ly(expDateDfMelt,x=~Date,y=~value,color=~variable,mode='lines+markers')
})
TimeTrackSub=reactive({
TimeTrack=dataList[['TimeTrack']]
TimeTrack=TimeTrack[order(TimeTrack$Date),]
TimeTrackSub=data.frame(TimeTrack[,c('Date','Pending','Positive','Negative','Isolated','CloseContactInSG','Critical','Discharge')])
for (i in colnames(TimeTrackSub)[2:ncol(TimeTrackSub)])
{
timeSeries=as.numeric(TimeTrackSub[,i])
index=which(is.na(timeSeries))
for (j in 1:length(index))
{
timeSeries[index[j]]=floor((timeSeries[index[j]-1]+timeSeries[index[j]+1])/2)
}
TimeTrackSub[,i]=timeSeries
}
TimeTrackSub$CloseContactFactor=TimeTrackSub$CloseContactInSG/TimeTrackSub$Positive
return(TimeTrackSub)
})
TimeTrackMelt=reactive({
TimeTrackSub=TimeTrackSub()
TimeTrackMelt=melt(TimeTrackSub,id.vars='Date')
TimeTrackMelt$Date=as.Date(TimeTrackMelt$Date)
return(TimeTrackMelt)
})
TimeTrackDiff=reactive({
TimeTrackSub=TimeTrackSub()
TimeTrackDiff=data.frame(Date=TimeTrackSub$Date[2:nrow(TimeTrackSub)])
for (i in c(setdiff(colnames(TimeTrackSub),c('Date','Pending','Critical','Discharge','CloseContactFactor'))))
{
TimeTrackDiff[,i]=diff(as.numeric(TimeTrackSub[,i]),1)
}
return(TimeTrackDiff)
})
# tsDf=data.frame(x=1:nrow(TimeTrackMelt),y=as.numeric(TimeTrackMelt$value[which(TimeTrackMelt$variable=='CloseContactFactor')]),stringsAsFactors=FALSE)
# coef(lm(y~x,tsDf))
# tsDf=data.frame(x=1:nrow(TimeTrackDiffMelt),y=as.numeric(TimeTrackDiffMelt$value[which(TimeTrackDiffMelt$variable=='CloseContactIncFactor')]),stringsAsFactors=FALSE)
# tsDf$y[which(tsDf$y==Inf)]=0
# coef(lm(y~x,tsDf))
# tsDf=data.frame(x=1:nrow(TimeTrackDiffMelt),y=as.numeric(TimeTrackDiffMelt$value[which(TimeTrackDiffMelt$variable=='PositiveInc')]),stringsAsFactors=FALSE)
# coef(lm(y~x,tsDf))
output$contactTraceChartDate=renderPlotly({
TimeTrackMelt=TimeTrackMelt()
TimeTrackMeltSub=TimeTrackMelt[which(TimeTrackMelt$variable %in% c('Positive','CloseContactFactor')),]
plot_ly(TimeTrackMeltSub,x=~Date,y=~value,color=~variable,mode='lines+markers')
})
TimeTrackDiffMelt=reactive({
TimeTrackDiff=TimeTrackDiff()
TimeTrackDiff$CloseContactIncFactor=TimeTrackDiff$CloseContactInSG/TimeTrackDiff$Positive
TimeTrackDiff$CloseContactIncFactor[which(is.na(TimeTrackDiff$CloseContactFactor))]=0
TimeTrackDiff$PositiveInc=TimeTrackDiff$Positive
TimeTrackDiffMelt=melt(TimeTrackDiff[,c('Date','PositiveInc','CloseContactIncFactor')],id.vars='Date')
return(TimeTrackDiffMelt)
})
output$traceFactorIncPlot=renderPlotly({
TimeTrackDiffMelt=TimeTrackDiffMelt()
plot_ly(TimeTrackDiffMelt,x=~Date,y=~value,color=~variable,mode='lines+markers')
})
TimeTrackDiffSprd=reactive({
TimeTrackDiff=TimeTrackDiff()
TimeTrackDiff$PosNegRatio=TimeTrackDiff$Positive/(TimeTrackDiff$Negative+TimeTrackDiff$Positive)
TimeTrackDiffSprd=melt(TimeTrackDiff[,c('Date','Positive','Negative','PosNegRatio')],id.vars=c('Date','PosNegRatio'))
#plot_ly(TimeTrackDiffSprd,x=~Date,y=~value,color=~variable,type='bar') %>% layout(barmode='stack')
TimeTrackDiffSprd=TimeTrackDiffSprd[order(TimeTrackDiffSprd$Date),]
return(TimeTrackDiffSprd)
})
output$posNegRatioPlot=renderPlotly({
TimeTrackDiffSprd=TimeTrackDiffSprd()
plot_ly(TimeTrackDiffSprd) %>% add_trace(x=~Date,y=~value,color=~variable,type='bar') %>% add_trace(x=~Date,y=~PosNegRatio,yaxis='y2',type = 'scatter', mode = 'lines',name='PosOutcomeRatio') %>% layout(barmode = 'stack',title = 'Srv Sales', xaxis = list(title = "FY_QR"), yaxis = list(side = 'left', title = 'TestCount', showgrid = FALSE, zeroline = FALSE), yaxis2 = list(side = 'right', overlaying = "y", title = 'Ratio',range=c(0,1), showgrid = FALSE, zeroline = FALSE))
})
output$CaseTrend=renderPlotly({
TimeTrackSub=TimeTrackSub()
TimeTrackSubSub=TimeTrackSub[,c('Date','Positive','Discharge','Critical')]
TimeTrackSubSub$NormalWard=TimeTrackSubSub$Positive-TimeTrackSubSub$Discharge-TimeTrackSubSub$Critical
CaseMelt=melt(TimeTrackSubSub[,c('Date','NormalWard','Critical','Discharge')],id.vars='Date')
plot_ly(CaseMelt,x=~Date,y=~value,color=~variable,type='bar') %>% layout(barmode='stack')
})
######################################################################
# Indicators
######################################################################
### Admission rate vs discharge rate
output$dateMsg=renderUI({
DateSelect=input$DateSelect
HTML(sprintf("<h3>Information as of %s</h3>",DateSelect))
})
output$indicatorPlot <- renderUI({
indCols=c('Date','WardIncInd','CloseContactIncInd','QuarantineCurrentIncInd','UnlinkCnt','ExpCaseInd')
selectedCol=c(indCols,indicatorSource[indCols])
selectedCol=selectedCol[which(!is.na(selectedCol))]
plotData=dataList[['TimeTrack']][,selectedCol]
displayMap=setNames(c('Inpatient','Close Contact','Quarantined','Unlinked Cases','Case Exposure'),c('WardIncInd','CloseContactIncInd','QuarantineCurrentIncInd','UnlinkCnt','ExpCaseInd'))
tagList(
lapply(c('WardIncInd','CloseContactIncInd','QuarantineCurrentIncInd','UnlinkCnt','ExpCaseInd'), function(i){
box(width=6,title=sprintf("Indicator %s and its source historical trend",i)
,renderPlotly({
subplotList=list()
dataTmp=plotData[,c('Date',i)]
names(dataTmp)[2]='Indicator'
dataTmp$Indicator[which(dataTmp$Indicator>0)]=1
dataTmp$Sum5DayInd=c(dataTmp$Indicator[1:4],rollsum(dataTmp$Indicator,5))
colorCode=factor(indicatorColorMap[as.character(dataTmp$Sum5DayInd)],levels=unique(indicatorColorMap[as.character(dataTmp$Sum5DayInd)]))
subplotList[[1]]=plot_ly(dataTmp) %>% add_trace(x=~Date,y=~Sum5DayInd,mode='markers',color=colorCode,colors=unique(indicatorColorMap[as.character(dataTmp$Sum5DayInd)]),marker=list(size=15)) %>% layout(yaxis=list(title=displayMap[i],range=c(-1,6)))
if (!is.na(indicatorSource[i]))
{
dataTmp=plotData[,c('Date',indicatorSource[i])]
names(dataTmp)[2]='IndicatorSrc'
subplotList[[2]]=plot_ly(dataTmp) %>% add_trace(x=~Date,y=~IndicatorSrc,mode='lines') %>% layout(yaxis=list(title=sprintf("Source of %s",displayMap[i]))) %>% add_segments(x=min(dataTmp$Date),xend=max(dataTmp$Date),y=~median(dataTmp$IndicatorSrc),yend=~median(dataTmp$IndicatorSrc),mode='lines')
}
if (i=='UnlinkCnt')
{
dataTmp=plotData[,c('Date',i)]
names(dataTmp)[2]='IndicatorSrc'
subplotList[[2]]=plot_ly(dataTmp) %>% add_trace(x=~Date,y=~IndicatorSrc,mode='lines') %>% layout(yaxis=list(title=sprintf("Source of %s",displayMap[i]))) %>% add_segments(x=min(dataTmp$Date),xend=max(dataTmp$Date),y=~median(dataTmp$IndicatorSrc),yend=~median(dataTmp$IndicatorSrc),mode='lines')
}
subplot(subplotList,nrows=1)
})
)
})
)
})
# output$indicatorPlot=renderUI({
# plotData=dataList[['TimeTrack']][,c('Date','WardIncInd','CloseContactIncInd','QuarantineCurrentIncInd','UnlinkCnt','ExpCaseInd')]
# displayMap=setNames(c('Inpatient','Close Contact','Quarantined','Unlinked Cases','Case Exposure'),c('WardIncInd','CloseContactIncInd','QuarantineCurrentIncInd','UnlinkCnt','ExpCaseInd'))
# for (i in c('WardIncInd','CloseContactIncInd','QuarantineCurrentIncInd','UnlinkCnt','ExpCaseInd'))
# tagList(lapply(1:length(c('WardIncInd','CloseContactIncInd','QuarantineCurrentIncInd','UnlinkCnt','ExpCaseInd')),function(i)
# {
# box(width=12
# ,renderPlotly({
# dataTmp=plotData[,c('Date',i)]
# names(dataTmp)[2]='Indicator'
# dataTmp$Indicator[which(dataTmp$Indicator>0)]=1
# dataTmp$Sum5DayInd=c(dataTmp$Indicator[1:4],rollsum(dataTmp$Indicator,5))
# colorCode=factor(indicatorColorMap[as.character(dataTmp$Sum5DayInd)],levels=unique(indicatorColorMap[as.character(dataTmp$Sum5DayInd)]))
# plot_ly(dataTmp) %>% add_trace(x=~Date,y=~Sum5DayInd,mode='markers',color=colorCode,colors=unique(indicatorColorMap[as.character(dataTmp$Sum5DayInd)]),marker=list(size=15)) %>% layout(yaxis=list(title=displayMap[i],range=c(-1,6)))
# })
# )
# }))
# })
output$indicatorPlotUI=renderUI({
msg=paste(indicatorDisplay,collapse="<br />")
return(HTML(msg))
})
output$trafficLight=renderUI({
DateSelect=input$DateSelect
#TimeTrackSubPrev=dataList[['TimeTrack']][which(dataList[['TimeTrack']]$Date<=(as.Date(DateSelect)-1)),]
TimeTrackSub=dataList[['TimeTrack']][which(dataList[['TimeTrack']]$Date<=DateSelect),]
TimeTrackSub=TimeTrackSub[order(TimeTrackSub$Date),]
TimeTrackSubPrev=TimeTrackSub[1:(nrow(TimeTrackSub)-1),]
#################################
indicatorListPrev=list()
indicatorListPrev[['Inpatient']]=list()
WardIncInd=sum(tail(TimeTrackSubPrev$WardIncInd,5))
indicatorListPrev[['Inpatient']][['bgcolor']]=indicatorColorMap[as.character(WardIncInd)]
indicatorListPrev[['Inpatient']][['ind']]=WardIncInd
indicatorListPrev[['Close Contact']]=list()
ContactInd=sum(tail(TimeTrackSubPrev$CloseContactIncInd,5))
indicatorListPrev[['Close Contact']][['bgcolor']]=indicatorColorMap[as.character(ContactInd)]
indicatorListPrev[['Close Contact']][['ind']]=ContactInd
indicatorListPrev[['Quaran-tined']]=list()
QuarantineCurrentIncInd=sum(tail(TimeTrackSubPrev$QuarantineCurrentIncInd,5))
indicatorListPrev[['Quaran-tined']][['bgcolor']]=indicatorColorMap[as.character(QuarantineCurrentIncInd)]
indicatorListPrev[['Quaran-tined']][['ind']]=QuarantineCurrentIncInd
indicatorListPrev[['Unlinked Cases']]=list()
TimeTrackSubPrev$UnlinkCnt[which(TimeTrackSubPrev$UnlinkCnt>0)]=1
unlinkInd=sum(tail(TimeTrackSubPrev$UnlinkCnt,5))
indicatorListPrev[['Unlinked Cases']][['bgcolor']]=indicatorColorMap[as.character(unlinkInd)]
indicatorListPrev[['Unlinked Cases']][['ind']]=unlinkInd
# indicatorListPrev[['Case Exposure']]=list()
# expCaseInd=sum(tail(TimeTrackSubPrev$ExpCaseInd,5))
# indicatorListPrev[['Case Exposure']][['bgcolor']]=indicatorColorMap[as.character(expCaseInd)]
# indicatorListPrev[['Case Exposure']][['ind']]=expCaseInd
####################
indicatorList=list()
indicatorList[['Inpatient']]=list()
WardIncInd=sum(tail(TimeTrackSub$WardIncInd,5))
indicatorList[['Inpatient']][['bgcolor']]=indicatorColorMap[as.character(WardIncInd)]
indicatorList[['Inpatient']][['ind']]=WardIncInd
indicatorList[['Close Contact']]=list()
ContactInd=sum(tail(TimeTrackSub$CloseContactIncInd,5))
indicatorList[['Close Contact']][['bgcolor']]=indicatorColorMap[as.character(ContactInd)]
indicatorList[['Close Contact']][['ind']]=ContactInd
indicatorList[['Quaran-tined']]=list()
QuarantineCurrentIncInd=sum(tail(TimeTrackSub$QuarantineCurrentIncInd,5))
indicatorList[['Quaran-tined']][['bgcolor']]=indicatorColorMap[as.character(QuarantineCurrentIncInd)]
indicatorList[['Quaran-tined']][['ind']]=QuarantineCurrentIncInd
indicatorList[['Unlinked Cases']]=list()
TimeTrackSub$UnlinkCnt[which(TimeTrackSub$UnlinkCnt>0)]=1
unlinkInd=sum(tail(TimeTrackSub$UnlinkCnt,5))
indicatorList[['Unlinked Cases']][['bgcolor']]=indicatorColorMap[as.character(unlinkInd)]
indicatorList[['Unlinked Cases']][['ind']]=unlinkInd
# indicatorList[['Case Exposure']]=list()
# expCaseInd=sum(tail(TimeTrackSub$ExpCaseInd,5))
# indicatorList[['Case Exposure']][['bgcolor']]=indicatorColorMap[as.character(expCaseInd)]
# indicatorList[['Case Exposure']][['ind']]=expCaseInd
##################
indicatorChangeList=list()
for (i in names(indicatorList))
{
indicatorChangeList[[i]]=ifelse(indicatorListPrev[[i]][['ind']]==indicatorList[[i]][['ind']],"--",ifelse(indicatorListPrev[[i]][['ind']]>indicatorList[[i]][['ind']],"<font color='green'><b>Better</b></font>","<font color='red'><b>Worse</b></font>"))
}
width=60
widthTable=width*length(indicatorList)
dorconMsg=sprintf("<table border=1 width='%dpx' height='%dpx><tr valign='center' align='center'><td bgcolor='%s' align='center'><big><b>DORSCON</b></big></td><tr /></table>",widthTable,width,tail(TimeTrackSub$DORSCON,1))
indicatorRowMsg=sprintf("%s<b>Predict Indicators</b><table border=1 width='%dpx' height='%dpx><tr valign='center' align='center'>",dorconMsg,widthTable,width)
for (i in names(indicatorList))
{
indicatorRowMsg=sprintf("%s<td bgcolor='%s' width='%dpx' align='center'>%s<br />%s</td>",indicatorRowMsg,indicatorList[[i]][['bgcolor']],width,i,indicatorChangeList[[i]])
}
indicatorRowMsg=sprintf("%s</tr>",indicatorRowMsg)
msg=sprintf("%s</table>",indicatorRowMsg)
return(HTML(msg))
})
output$trafficLightMsg=renderUI({
indicatorMsg=sprintf('<h5><b>Indicator Color Code (Green, Yellow, Orange, Red)</b></h5><h5>The color shows if situation is getting better based on some indicators.<br />Inpatent/CloseContact: If increase in inpatient or Close Contact count is higher than the previous period median, it is flagged as 1. The color code is determined by sum of flags for the past 5 days with Green=0, Yellow=1,2, Orange=3,4 and Red=5. LabTests represents number of samples sent to lab each day.</h5>')
return(HTML(indicatorMsg))
})
output$interestingMsg=renderUI({
DateSelect=input$DateSelect
PERSONT=dataList[['PERSON']][which(dataList[['PERSON']]$AnnouncementDate<=DateSelect),]
PERSONT$RecoveryDays=as.numeric(difftime(as.Date(PERSONT$Discharge,'%d-%b-%Y'),as.Date(PERSONT$SymptomsFirst,'%d-%b-%Y'),unit='days'))
PERSONT$DetectionTime=as.numeric(difftime(as.Date(PERSONT$DateConfirmed,'%d-%b-%Y'),as.Date(PERSONT$SymptomsFirst,'%d-%b-%Y'),unit='days'))
medianHospDays=median(PERSONT$RecoveryDays[which(!is.na(PERSONT$RecoveryDays))])
rangeHospDay=range(PERSONT$RecoveryDays[which(!is.na(PERSONT$RecoveryDays))])
medianHospDays=median(PERSONT$RecoveryDays[which(!is.na(PERSONT$RecoveryDays))])
rangeHospDay=range(PERSONT$RecoveryDays[which(!is.na(PERSONT$RecoveryDays))])
medianDetectionTime=median(PERSONT$DetectionTime[which(!is.na(PERSONT$DetectionTime))])
rangeDetectionTime=range(PERSONT$DetectionTime[which(!is.na(PERSONT$DetectionTime))])
GenderTable=table(PERSONT$Gender)
GPVisitMedian=median(PERSONT$GPVisit,na.rm=TRUE)
GPVisitRange=range(PERSONT$GPVisit,na.rm=TRUE)
msg=sprintf("Date: %s<br />Median <b>recovery</b> days <font size=6>%0.1f</font><font size=4> (range %d - %d days)</font><small>(first symptom to discharge, based on dischared data only)</small>",DateSelect,medianHospDays,rangeHospDay[1],rangeHospDay[2])
msg=sprintf("%s<br />Median <b>detection</b> days: <font size=6>%0.1f</font><font size=4> (range %d - %d days)</font><small>(first symptom to confirm date)</small>",msg,medianDetectionTime,rangeDetectionTime[1],rangeHospDay[2])
msg=sprintf("%s<br />Median no. of <b>GP visits</b> before admitted: <font size=6>%0.2f</font><font size=4> (range %d - %d times)</font><small>(For those with information)</small>",msg,GPVisitMedian,GPVisitRange[1],GPVisitRange[2])
msg=sprintf("%s<br /><b>Gender Ratio</b>: Male (%d) to female (%d) ratio is <font size=6>%0.2f</font> SG resident gender ratio is 1.0422",msg,GenderTable['M'],GenderTable['F'],GenderTable['M']/GenderTable['F'])
HTML(msg)
})
############ hospitalization observation
output$wardedPlot=renderPlotly({
wardedPerson=dataList[['PERSON']][which(is.na(dataList[['PERSON']]$Discharge)),]
wardedPerson$hospDays=as.numeric(difftime(Sys.Date()+1,as.Date(wardedPerson$DateConfirmed,'%d-%b-%Y'),unit='days'))
wardedAgg=data.frame(table(wardedPerson$hospDays))
names(wardedAgg)[1]='HospDay'
wardedAgg$HospDay=as.numeric(as.character(wardedAgg$HospDay))
wardedPerson$PersonID=sprintf("Case%s (%s)",wardedPerson$PersonID,wardedPerson$Notes)
wardedText=aggregate(PersonID~hospDays,wardedPerson,paste,collapse="<br />")
wardedTextMap=setNames(wardedText$PersonID,as.character(wardedText$hospDays))
wardedAgg$text=wardedTextMap[as.character(wardedAgg$HospDay)]
medianAge=median(as.numeric(wardedPerson$Age),na.rm=TRUE)
plot_ly(wardedAgg,x=~HospDay,y=~Freq,type='bar', text=~text) %>% layout(title=sprintf('Count of currently warded by days hospitalized (median age=%0.1f)',medianAge))
})
output$dischargePlot=renderPlotly({
dischgPerson=dataList[['PERSON']][which(!is.na(dataList[['PERSON']]$Discharge) & is.na(dataList[['PERSON']]$Death)),]
dischgAgg=data.frame(table(dischgPerson$RecoveryDays))
names(dischgAgg)[1]='RecoveryDays'
dischgAgg$RecoveryDays=as.numeric(dischgAgg$RecoveryDays)
dischgPerson$PersonID=sprintf("Case%s (%s)",dischgPerson$PersonID,dischgPerson$Notes)
recoveryText=aggregate(PersonID~RecoveryDays,dischgPerson,paste,collapse="<br />")
recoveryTextMap=setNames(recoveryText$PersonID,recoveryText$RecoveryDays)
dischgAgg$text=recoveryTextMap[as.character(dischgAgg$RecoveryDays)]
medianAge=median(as.numeric(dischgPerson$Age),na.rm=TRUE)
medianRecoveryDays=median(dischgPerson$RecoveryDays,na.rm=TRUE)
plot_ly(dischgAgg,x=~RecoveryDays,y=~Freq,type='bar', text=~text) %>% layout(title=sprintf('Count of discharged person hosp days (median age=%0.1f, median recovery days=%0.1f)',medianAge,medianRecoveryDays))
})
######################### Update
output$posPlot=renderPlotly({
DateSelect=input$DateSelect
TimeTrackSub=dataList[['TimeTrack']][which(dataList[['TimeTrack']]$Date<=DateSelect),]
plot_ly(TimeTrackSub) %>% add_trace(x=~Date,y=~Positive,mode='lines+markers',name='Positive') %>% add_trace(x=~Date,y=~PosInWard,mode='lines+markers',name='Pos Still in ward') %>% add_trace(x=~Date,y=~QuarantineCurrent,yaxis='y2',type='scatter',mode='lines+markers') %>% layout(xaxis = list(title=''), yaxis = list(side = 'left', title = '', showgrid = FALSE, zeroline = FALSE), yaxis2 = list(side = 'right', overlaying = "y", title = '', showgrid = FALSE, zeroline = FALSE),showlegend = FALSE)
})
output$posPlotMsg=renderUI({
DateSelect=input$DateSelect
TimeTrackSub=dataList[['TimeTrack']][which(dataList[['TimeTrack']]$Date<=DateSelect),]
msg=NULL
notIncDays=nrow(TimeTrackSub)-max(which(TimeTrackSub$WardInc>0))
posNotIncDays=nrow(TimeTrackSub)-max(which(TimeTrackSub$PosIncInc>0))
quarantineDecDay=nrow(TimeTrackSub)-max(which(TimeTrackSub$QuarantineCurrentInc>0))
if (notIncDays>1)
{
msg=sprintf("<font color='green'><b>Cases still in hospitals have decreased or remained same for consecutive %d days.</b></font>",notIncDays)
}
if (posNotIncDays>1)
{
msg=sprintf("%s<br /><font color='green'><b>Per day postivie decreases or remained same for consecutive %d days.</b></font>",msg,posNotIncDays)
}
if (quarantineDecDay>1)
{
msg=sprintf("%s<br /><font color='green'><b>Persons under quarantine decreased for consecutive %d days.</b></font>",msg,quarantineDecDay)
}
HTML(msg)
})
PersonNew=reactive({
DateSelect=input$DateSelect
PersonNew=dataList[['PERSON']][which(as.Date(dataList[['PERSON']]$AnnouncementDate)==DateSelect),]
return(PersonNew)
})
output$latestUpdate=renderUI({
DateSelect=input$DateSelect
PersonNew=PersonNew()
TimeTrackSub=dataList[['TimeTrack']][which(dataList[['TimeTrack']]$Date<=DateSelect),]
newCases=tail(TimeTrackSub$PosInc,1)
ClusterNewDf=dataList[['CLUSTER']][which(dataList[['CLUSTER']]$PersonID %in% PersonNew$PersonID),]
msg=''
if (tail(TimeTrackSub$PosInc,1)==max(TimeTrackSub$PosInc))
{
msg="<font color='red'>New high in one-day new cases</font>"
}
if (tail(TimeTrackSub$DeathInc,1)>0)
{
msg=sprintf("%s<font size=5><table border=1 width='100%%'><tr align='center'><td colspan=3>Today</td></tr><tr align='center'><td>New Case</td><td>Discharge</td><td><font color='red'>Death</font></td></tr><tr align='center'><td>%d</td><td>%d</td><td><font color='red'>%d</font></td></tr></table></font>",msg,newCases,tail(TimeTrackSub$DischargeInc,1),tail(TimeTrackSub$DeathInc,1))
} else
{
msg=sprintf("%s<font size=5><table border=1 width='100%%'><tr align='center'><td colspan=2>Today</td></tr><tr align='center'><td>New Case</td><td>Discharge</td></tr><tr align='center'><td>%d</td><td>%d</td></tr></table></font>",msg,newCases,tail(TimeTrackSub$DischargeInc,1))
}
msg=sprintf("%s<font size=3><table border=1 width='100%%'><tr align='Center'><td colspan=6>As of %s</td></tr><tr align='Center'><td>Positive</td><td>Warded</td><td>Discharged</td><td>Critical</td><td>Death</td><td>Quarantine</td></tr><tr align='center'><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td><td>%d</td></tr></table></font>",msg,DateSelect,tail(TimeTrackSub$Positive,1),tail(TimeTrackSub$Positive-TimeTrackSub$Discharge-TimeTrackSub$Death,1),tail(TimeTrackSub$Discharge,1),tail(TimeTrackSub$Critical,1),tail(TimeTrackSub$Death,1),tail(TimeTrackSub$QuarantineCurrent,1))
# msg=sprintf("<small>On %s</small><br /><h4>%d new cases, %d discharged, %d more close contacts</h4><small>As of %s</small><br /><h4>Warded: %d (%d critical), total %d close contacts<br />Total positive: %d; Total discharged: %d</h4>",DateSelect,newCases,tail(TimeTrackSub$DischargeInc,1),tail(TimeTrackSub$CloseCOntactInc,1),DateSelect,tail(TimeTrackSub$Positive-TimeTrackSub$Discharge,1),tail(TimeTrackSub$Critical,1),tail(TimeTrackSub$CloseContact,1),tail(TimeTrackSub$Positive,1),tail(TimeTrackSub$Discharge,1))
PERSONPERSONSub=dataList[['PERSONPERSON']][which(as.Date(dataList[['PERSONPERSON']]$Date) <= DateSelect),]
PERSONPERSONSub=PERSONPERSONSub[which(PERSONPERSONSub$`PersonID1` %in% PersonNew$PersonID | PERSONPERSONSub$`PersonID2` %in% PersonNew$PersonID),]
tracable=unique(c(intersect(PersonNew$PersonID,dataList[['CLUSTER']]$PersonID[which(!(dataList[['CLUSTER']]$Cluster %in% c('UNLINKED','OTH_Linked')))]),intersect(PersonNew$PersonID,dataList[['PERSONPERSON']]$PersonID2)))
if (nrow(PersonNew)==0)
{
msg=sprintf("%s<br /><big><b><font color=green>No confirmed cases</font></b></big><br />",msg)
} else
if (length(tracable)==nrow(PersonNew))
{
msg=sprintf("%s<br /><big><b><font color=green>All %d case(s) is/are linked to previous confirmed cases</font></b></big><br />",msg,nrow(PersonNew))
} else
{
msg=sprintf("%s<br /><big><b><font color=red>%d case(s) (%s) has/have not yet establish links to previous cases</font></b></big><br />",msg,nrow(PersonNew)-length(tracable),paste(sprintf("Case%s",setdiff(PersonNew$PersonID,tracable)),collapse=", "))
}
if (nrow(ClusterNewDf)>0)
{
ClusterNewAgg=aggregate(PersonID~Cluster,ClusterNewDf,length)
ClusterNewAgg=ClusterNewAgg[which(ClusterNewAgg$Cluster!='UNLINKED'),]
ClusterNewAgg$Display=sprintf("%d New cases from cluster %s",ClusterNewAgg$PersonID,ClusterNewAgg$Cluster)
msg=sprintf("%s<font color=blue>%s</font>",msg,paste(ClusterNewAgg$Display,collapse="<br />"))
}
if (nrow(PERSONPERSONSub)>0)
{
for (i in 1:nrow(PERSONPERSONSub))
{
msg=sprintf("%s<br /><font color=blue>Case%s and Case%s are linked</font>",msg,PERSONPERSONSub$`PersonID1`[i],PERSONPERSONSub$`PersonID2`[i])
}
}
HTML(msg)
})
output$NewCaseDT=renderDT({
PersonNew=dataList[['PERSON']]
PersonNew=PersonNew[order(PersonNew$PersonID,decreasing=TRUE),]
if (nrow(PersonNew)>0)
{
NewCaseDT=NULL
for (i in 1:nrow(PersonNew))
{
msg=sprintf("<h5>Case%d</h5>",PersonNew$PersonID[i])
if (PersonNew$PersonID[i] %in% dataList[['CLUSTER']]$PersonID)
{
msg=sprintf("%s%s",msg,dataList[['CLUSTER']]$Cluster[which(dataList[['CLUSTER']]$PersonID==PersonNew$PersonID[i])])
}
msg=sprintf("%s%s",msg,PersonNew$Report[i])
NewCaseDT=rbind(NewCaseDT,data.frame(CaseDetails=msg))
}
return(formatDTDisplay(NewCaseDT,escape=FALSE))
}
})
output$latestPlaceList=renderUI({
PersonNew=PersonNew()
placeIdList=dataList[['PERSONPLACE']]$PlaceID[which(dataList[['PERSONPLACE']]$PersonID %in% PersonNew$PersonID & dataList[['PERSONPLACE']]$Remarks!='Warded')]
msg=sprintf("<h3>Places exposed to latest cases: %s</h3>", paste(unique(dataList[['PLACE']]$Name[which(dataList[['PLACE']]$PlaceID %in% placeIdList)]),collapse=", "))
HTML(msg)
})
output$newPlaceMap=renderLeaflet({
PersonNew=PersonNew()
DateSelect=as.Date(input$DateSelect)
dateCutoff=DateSelect-14
PERSONPLACESub=dataList[['PERSONPLACE']][which(as.Date(dataList[['PERSONPLACE']]$DateEnd,'%d-%b-%Y')>=dateCutoff & dataList[['PERSONPLACE']]$Remarks!='Warded'),]
if (nrow(PERSONPLACESub)>0)
{
basemap=leaflet() %>% addTiles(group = "OSM") #%>% addProviderTiles("Stamen.TonerLite")
basemap %>% addMarkers(data = PERSONPLACESub, ~longitude,~latitude,popup = ~Display,icon=placeIcons[PERSONPLACESub$icon],clusterOptions = markerClusterOptions(),layerId=PERSONPLACESub$PlaceID)
}
})
treeList=reactive({
#https://adeelk93.github.io/collapsibleTree/
# dataList[['PERSONPERSON']]$PersonID1=unlist(apply(dataList[['PERSONPERSON']],1,function(x) return(ifelse(x['PersonID1']<x['PersonID2'],x['PersonID1'],x['PersonID2']))))
# dataList[['PERSONPERSON']]$PersonID1=trimws(dataList[['PERSONPERSON']]$PersonID1)
# dataList[['PERSONPERSON']]$PersonID2=unlist(apply(dataList[['PERSONPERSON']],1,function(x) return(ifelse(x['PersonID1']<x['PersonID2'],x['PersonID2'],x['PersonID1']))))
# dataList[['PERSONPERSON']]$PersonID2=trimws(dataList[['PERSONPERSON']]$PersonID2)
##############################################################################
PersonNoteMap=setNames(dataList[['PERSON']]$Notes,as.character(dataList[['PERSON']]$PersonID))
PersonNoteMap[is.na(PersonNoteMap)]=''
treeData1=data.frame(parent=sprintf("%s (%d cases)",dataList[['CLUSTER']]$Cluster,clusterSizeMap[dataList[['CLUSTER']]$Cluster]),node=sprintf("Case %s %s",dataList[['CLUSTER']]$PersonID,PersonNoteMap[as.character(dataList[['CLUSTER']]$PersonID)]),LinkType=dataList[['CLUSTER']]$LinkType,rawNode=dataList[['CLUSTER']]$PersonID,stringsAsFactors=FALSE)
treeData1$Name=treeData1$node
treeData1=treeData1[which(treeData1$LinkType=='Direct' | (treeData1$LinkType=='Indirect' & !(treeData1$rawNode %in% dataList[['PERSONPERSON']]$PersonID2))),]
treeData1$LinkType=NULL
clusterNode=data.frame(parent=NA,node=sprintf("%s (%d cases)",unique(dataList[['CLUSTER']]$Cluster),clusterSizeMap[unique(dataList[['CLUSTER']]$Cluster)]),rawNode=unique(dataList[['CLUSTER']]$Cluster),stringsAsFactors=FALSE)
clusterNode$Name=clusterNode$node
treeData=rbind(treeData1,clusterNode)
#treeData=rbind(treeData,data.frame(parent='COVID19SG',node=unique(treeData$parent),Name=NA,stringsAsFactors=FALSE))
#treeData=rbind(treeData,data.frame(parent=NA,node='COVID19SG',Name=sprintf('COVID19SG (%d cases)',nrow(dataList[['PERSON']])),stringsAsFactors=FALSE))
missingNodes=dataList[['PERSON']]$PersonID[which(!(as.character(dataList[['PERSON']]$PersonID) %in% treeData1$rawNode))]
missingNodes=missingNodes[order(missingNodes,decreasing=TRUE)]
acctedId=as.numeric(treeData1$rawNode)
acctedId=acctedId[!is.na(acctedId)]
treeData5=NULL
for (nid in missingNodes)
{
if (!(nid %in% acctedId))
{
currentID=nid
linkedID=setNames(currentID,sprintf("Case%s",currentID))
while (currentID %in% dataList[['PERSONPERSON']]$PersonID2)
{
nextID=max(as.numeric(dataList[['PERSONPERSON']]$PersonID1[which(dataList[['PERSONPERSON']]$PersonID2==currentID)]))
#reln=dataList[['PERSONPERSON']]$Relation2[which(dataList[['PERSONPERSON']]$PersonID1==nextID & dataList[['PERSONPERSON']]$PersonID2==currentID)]
linkedID=c(setNames(nextID,sprintf("Case%s",nextID)),linkedID)
currentID=nextID
#print(sprintf("%s %s",currentID,nextID))
}
for (j in 1:(length(linkedID)-1))
{
tmptree=data.frame(parent=sprintf("Case %s %s",linkedID[j],PersonNoteMap[linkedID[j]]),node=sprintf("Case %s %s",linkedID[j+1],PersonNoteMap[linkedID[j+1]]),rawNode=linkedID[j+1],stringsAsFactors=FALSE)
tmptree$Name=tmptree$node
treeData5=rbind(treeData5,tmptree)
}
acctedId=c(acctedId,linkedID)
}
}
treeData5=unique(treeData5)
treeData=rbind(treeData,unique(treeData5))
dischargeList=as.character(dataList[['PERSON']]$PersonID[which(!is.na(dataList[['PERSON']]$Discharge))])
deathList=as.character(dataList[['PERSON']]$PersonID[which(!is.na(dataList[['PERSON']]$Death))])
treeData$Color='lightblue'
treeData$Color[which(treeData$rawNode %in% as.character(dataList[['PERSON']]$PersonID))]='pink'
treeData$Color[which(treeData$rawNode %in% dischargeList)]='lightgreen'
treeData$Color[which(treeData$rawNode %in% deathList)]='red'
treeNameMap=setNames(treeData$Name,treeData$node)
treeData$node=treeData$Name
treeData$parent=treeNameMap[treeData$parent]
# treeData$Name[which(treeData$node %in% c(treeData1$parent))]=sprintf("%s (%d cases)",treeData$node[which(treeData$node %in% c(treeData1$parent))],clusterSizeMap[treeData$node[which(treeData$node %in% c(treeData1$parent))]])
# treeData$Color = 'lightblue'
# treeData$Color[grep("[0-9]",treeData$node)] = 'pink'
# treeData$Color[which(treeData$node %in% dataList[['PERSON']]$PersonID[which(!is.na(dataList[['PERSON']]$Discharge))])]='lightgreen'
# treeData$Color[which(treeData$node=='COVID19SG')]='lightblue'
# clusterList=unique(c(intersect(dataList[['CLUSTER']]$Cluster,treeData$parent)))
# nameMap=setNames(unique(treeData$Name),unique(treeData$node))
# treeData$parent[which(treeData$parent %in% names(nameMap))]=nameMap[treeData$parent[which(treeData$parent %in% names(nameMap))]]
# treeData$node[which(treeData$node %in% names(nameMap))]=nameMap[treeData$node[which(treeData$node %in% names(nameMap))]]
################### Split Tree #################################################
nodesAccted=NULL
treeList=list()
for (cl in clusterNode$node)
{
treeTmp=treeData[which(treeData$parent %in% cl),]
treeNode=treeTmp$node
newNode=treeTmp$node
while (length(newNode)>0)
{
newNode=setdiff(treeData$node[which(treeData$parent %in% treeNode)],treeNode)
treeNode=c(treeNode,newNode)
}
treeList[[cl]]=treeData[which(treeData$parent %in% c(cl,treeNode) | treeData$node %in% c(cl,treeNode)),]
#treeList[[cl]]=rbind(treeList[[cl]][which(is.na()),],data.frame(parent=NA,node=cl,Name=cl,Color='lightblue'))
#flag=flag+1
#nodesAccted=unique(c(nodesAccted,c(treeList[[cl]]$parent,treeList[[cl]]$node)))
}
return(treeList)
})
output$networkUI=renderUI({
treeList=treeList()
treeInfo=NULL
for (i in names(treeList))
{
treeInfo=rbind(treeInfo,data.frame(cluster=i,Discharged=length(unique(treeList[[i]]$node[which(treeList[[i]]$Color=='lightgreen')])),Warded=length(unique(treeList[[i]]$node[which(treeList[[i]]$Color=='pink')])),stringsAsFactors=FALSE))
}
treeInfo=treeInfo[order(treeInfo$Warded,decreasing=TRUE),]
do.call(tagList,lapply(treeInfo$cluster,function(x) {
# renderUI({
# HTML(sprintf("%s",x))
# })
box(width=6,title=sprintf("%s [Discharged:%d, Warded=%d]",x,treeInfo$Discharge[which(treeInfo$cluster==x)],treeInfo$Warded[which(treeInfo$cluster==x)])
,renderCollapsibleTree({
collapsibleTreeNetwork(treeList[[x]], attribute = "Name", nodeSize="leafCount", fill="Color",collapsed = FALSE)
})
)
}))
# # x=1
# # collapsibleTreeNetwork(treeList[[x]], attribute = "Name", nodeSize="leafCount", fill="Color",collapsed = FALSE)
})
output$ClusterTrack=renderPlotly({
DateSelect=as.Date(input$DateSelect)
ClusterSub=data.table(dataList[['PERSON']][which(as.Date(dataList[['PERSON']]$DateConfirmed,'%d-%b-%Y')<=DateSelect),])[,.(CaseCount=.N,text=paste(sprintf("Case%d",PersonID),collapse=",")),by=list(groupName,DateConfirmed)]
ClusterSub$LastCaseDate=NA
for (i in 1:nrow(ClusterSub))
{
personSub=dataList[['PERSON']][which(dataList[['PERSON']]$groupName %in% ClusterSub$groupName[i]),]
ClusterSub$LastCaseDate[i]=format(max(personSub$AnnouncementDate),'%d-%b-%Y')
ClusterSub$FirstCaseDate[i]=format(min(personSub$AnnouncementDate),'%d-%b-%Y')
ClusterSub$DaysFrLastCase[i]=as.numeric(difftime(as.Date(todayDate,'%Y-%m-%d'),max(personSub$AnnouncementDate),unit='days'))
ClusterSub$colorCode[i]=dayIconColorCode(ClusterSub$DaysFrLastCase[i])
}
ClusterSub$groupName[which(ClusterSub$groupName=='OTH_LOCAL')]='Unlinked Local Cases'
groupCaseCnt=aggregate(CaseCount~groupName,ClusterSub,sum)
groupCaseCntMap=setNames(groupCaseCnt$CaseCount,groupCaseCnt$groupName)
ClusterSub$groupName=sprintf("%s (%d cases)",ClusterSub$groupName,groupCaseCntMap[ClusterSub$groupName])
ClusterSub=ClusterSub[order(ClusterSub$DaysFrLastCase,decreasing=TRUE),]
clusterLevels=unique(ClusterSub$groupName)
ClusterSub$groupName=factor(ClusterSub$groupName,levels=clusterLevels)
ClusterSub$DateConfirmed=as.Date(ClusterSub$DateConfirmed,'%d-%b-%Y')
ClusterSub=ClusterSub[order(ClusterSub$colorCode),]
colorLevels=unique(ClusterSub$colorCode)
ClusterSub$colorCode=factor(ClusterSub$colorCode,levels=colorLevels)
ClusterSub=ClusterSub[order(ClusterSub$DateConfirmed),]
ClusterSub$display=sprintf("%s<br />%d cases",ClusterSub$text,ClusterSub$CaseCount)
clusterColor=unique(ClusterSub[,c('groupName','colorCode')])
clusterSprdTxt=paste(clusterColor$groupName[which(clusterColor$colorCode=='red')],collapse=",")
plot_ly(ClusterSub,x=~DateConfirmed,y=~groupName,text=~display,mode='markers',type = 'scatter',color=~colorCode,colors=~colorLevels,size=~CaseCount) %>% layout(title=sprintf("%d active clusters: %s",nrow(clusterColor[which(clusterColor$color=='red')]),clusterSprdTxt))
})
####################################################################
### World data
#####################################################################
plotList=reactive({
currentSlider=as.numeric(input$currentSlider)
ctryCurrentData=ctryDataFClean[which(ctryDataFClean$Date==max(ctryDataFClean$Date)),]
ctryDataToday=ctryCurrentData
ctryCurrentData=ctryCurrentData[order(ctryCurrentData$CurInfected,decreasing=TRUE),]
ctryCurrentDataTopA=ctryCurrentData[currentSlider[1]:currentSlider[2],list(Country,CurInfected,ConfirmedInc1)]
ctryCurrentDataTopB=ctryCurrentData[currentSlider[1]:currentSlider[2],list(Country,Recovered,ConfirmedInc1)]
ctryCurrentDataTopC=ctryCurrentData[currentSlider[1]:currentSlider[2],list(Country,Deaths,ConfirmedInc1)]
ctryFactor=ctryCurrentDataTopA$Country[order(ctryCurrentDataTopA$CurInfected,decreasing=TRUE)]
ctryCurrentDataTopA$Country=factor(ctryCurrentDataTopA$Country,levels=ctryFactor)
ctryCurrentDataTopB$Country=factor(ctryCurrentDataTopB$Country,levels=ctryFactor)
ctryCurrentDataTopC$Country=factor(ctryCurrentDataTopC$Country,levels=ctryFactor)
names(ctryCurrentDataTopA)[which(names(ctryCurrentDataTopA)=='CurInfected')]='Count'
names(ctryCurrentDataTopB)[which(names(ctryCurrentDataTopB)=='Recovered')]='Count'
names(ctryCurrentDataTopC)[which(names(ctryCurrentDataTopC)=='Deaths')]='Count'
ctryCurrentDataTopA$Type='CurrentInfected'
ctryCurrentDataTopB$Type='Recovered'
ctryCurrentDataTopC$Type='Deaths'
ctryCurrentDataTop=rbind(ctryCurrentDataTopA,ctryCurrentDataTopB,ctryCurrentDataTopC)
ctryCurrentDataTop$Type=as.factor(ctryCurrentDataTop$Type)
##########################################################################
ctryDataFCleanSub=ctryDataFClean[which(ctryDataFClean$Country %in% ctryCurrentDataTop$Country),]
ctryDataFCleanSub$CurInfectedPerM=ctryDataFCleanSub$CurInfected/ctryDataFCleanSub$Pop2020Mil
ctryDataFCleanSub$Country=factor(ctryDataFCleanSub$Country,levels=ctryFactor)
ctryDataFCleanSub=ctryDataFCleanSub[which(ctryDataFCleanSub$DayInfect>0),]
###############################################################
ctryDataToday=ctryDataToday[order(ctryDataToday$CurInfected,decreasing=TRUE),]
##########################################################################
plotList=list()
plotList[['ctryCurrentDataTop']]=ctryCurrentDataTop
plotList[['ctryDataFCleanSub']]=ctryDataFCleanSub
plotList[['ctryDataToday']]=ctryDataToday
return(plotList)
})
ctryCurrentDataTop=reactive(plotList()[['ctryCurrentDataTop']])
ctryDataFCleanSub=reactive(plotList()[['ctryDataFCleanSub']])
ctryDataToday=reactive(plotList()[['ctryDataToday']])
output$currentTop=renderPlotly({
ctryCurrentDataTop=ctryCurrentDataTop()
plot_ly(ctryCurrentDataTop,x=~Country,y=~Count,type='bar',color=~Type) %>% add_trace(x=~Country,y=~ConfirmedInc1,type='scatter',mode='lines',yaxis='y2',name='1-day increase') %>% layout(barmode='stack',title = 'Currently Infected/1-day case increase', xaxis = list(title = ""), yaxis = list(side = 'left', title = 'Currently Infected', showgrid = FALSE, zeroline = FALSE), yaxis2 = list(side = 'right', overlaying = "y", title = 'Increase', showgrid = FALSE, zeroline = FALSE))
})
output$currentPerMPop=renderPlotly({
ctryDataFCleanSub=ctryDataFCleanSub()
ctryDataFCleanSub=ctryDataFCleanSub[order(ctryDataFCleanSub$DayInfect),]
maxDayInfecCtry=aggregate(DayInfect~Country,ctryDataFCleanSub,max)
ctryDataFCleanSubA=merge(ctryDataFCleanSub,maxDayInfecCtry,by='Country')
ctryDataFCleanSubA=ctryDataFCleanSubA[which(ctryDataFCleanSubA$DayInfect.x==ctryDataFCleanSubA$DayInfect.y),]
a=list(x=ctryDataFCleanSubA$DayInfect.x,y=ctryDataFCleanSubA$CurInfectedPerM,text=as.character(ctryDataFCleanSubA$Country))
plot_ly(ctryDataFCleanSub,x=~DayInfect,y=~CurInfectedPerM,color=~Country,type='scatter',mode='lines+markers') %>% layout(title='Currenly infected per mil popln vs Days since cases hit 1 in 1M popln',annotations = a)
})
output$confirmedPerMPop=renderPlotly({
ctryDataFCleanSub=ctryDataFCleanSub()
ctryDataFCleanSub=ctryDataFCleanSub[order(ctryDataFCleanSub$DayInfect),]
maxDayInfecCtry=aggregate(DayInfect~Country,ctryDataFCleanSub,max)
ctryDataFCleanSubA=merge(ctryDataFCleanSub,maxDayInfecCtry,by='Country')
ctryDataFCleanSubA=ctryDataFCleanSubA[which(ctryDataFCleanSubA$DayInfect.x==ctryDataFCleanSubA$DayInfect.y),]
a=list(x=ctryDataFCleanSubA$DayInfect.x,y=ctryDataFCleanSubA$ConfirmedPerPopM,text=as.character(ctryDataFCleanSubA$Country))
plot_ly(ctryDataFCleanSub,x=~DayInfect,y=~ConfirmedPerPopM,color=~Country,type='scatter',mode='lines+markers') %>% layout(title='Confirmed cases per mil popln vs Days since cases hit 1 in 1M popln',annotations = a)
})
ctryClusterData=reactive({
wctrySelect=input$wctrySelect
ctryDaysInfect=max(ctryDataFClean$DayInfect[which(ctryDataFClean$Country==wctrySelect)])
ctryDataFCleanSub=ctryDataFClean[which(ctryDataFClean$DayInfect>=0 & ctryDataFClean$DayInfect<=ctryDaysInfect),]
ctryDataFCleanSub=ctryDataFCleanSub[order(ctryDataFCleanSub$DayInfect),]
ctryDISprd=data.frame(spread(ctryDataFCleanSub[,c('ConfirmedPerPopM','DayInfect','Country')],key='DayInfect',value='ConfirmedPerPopM'))
rownames(ctryDISprd)=as.character(ctryDISprd$Country)
ctryDISprd[is.na(ctryDISprd)]=0
ctryDISprd$Country=NULL
#ctryDISprdM=as.matrix(ctryDISprd)
distMat=flattenCorrMatrix(as.matrix(dist(ctryDISprd)))
distMatSub=distMat[which(distMat$row==wctrySelect | distMat$column==wctrySelect),]
distMatSub=distMatSub[order(distMatSub$cor),]
distMatSub=distMatSub[1:10,]
ctryClusterData=ctryDataFClean[which(ctryDataFClean$Country %in% c(distMatSub$row,distMatSub$column)),]
return(ctryClusterData)
})
output$ctryCluster=renderPlotly({
wctrySelect=input$wctrySelect
ctryClusterData=ctryClusterData()
ctryClusterData=ctryClusterData[order(ctryClusterData$DayInfect),]
ctryClusterDataLab=ddply(ctryClusterData,.(Country), tail,1)
a=list(x=ctryClusterDataLab$DayInfect,y=ctryClusterDataLab$ConfirmedPerPopM,text=as.character(ctryClusterDataLab$Country))
plot_ly(ctryClusterData,x=~DayInfect,y=~ConfirmedPerPopM,color=~Country,type='scatter',mode='lines+markers') %>% layout(title=sprintf("Top 10 countries with most similar trend as %s",wctrySelect),annotations = a)
})
output$ctryRankTable=renderDT({
input$refresh
ctryDataToday=ctryDataToday()
displayOrder=c('Country','Date','DayInfect','CurInfectedPerM','ConfirmedPerPopM','Confirmed','Recovered','Deaths')
displayOrder=c(displayOrder,setdiff(names(ctryDataToday),displayOrder))
formatDTDisplay(ctryDataToday)
})
output$ctrySelectplot=renderUI({
ctryDataToday=ctryDataToday()
ctryRankTable_rows_selected=input$ctryRankTable_rows_selected
if (length(ctryRankTable_rows_selected)==0)
{
ctryRankTable_rows_selected=1
}
ctrySelected=ctryDataToday$Country[ctryRankTable_rows_selected]
ctryDataFCleanSub=ctryDataFClean[which(ctryDataFClean$Country %in% ctrySelected),]
ctryDataFCleanSub=ctryDataFCleanSub[order(ctryDataFCleanSub$Date),]
labelDf=ddply(ctryDataFCleanSub,.(Country), tail,1)
labelList=list(x=labelDf$DayInfect,y=labelDf$CurInfectedPerM,text=as.character(labelDf$Country))
labelList1=list(x=labelDf$DayInfect,y=labelDf$ConfirmedPerPopM,text=as.character(labelDf$Country))
list(
box(width=12
,renderPlotly(plot_ly(ctryDataFCleanSub,x=~DayInfect,y=~CurInfectedPerM,color=~Country,type='scatter',mode='lines+markers') %>% layout(title='Currenly infected per mil popln vs Days since cases hit 1 in 1M popln',annotations = labelList))
#,renderPlotly(plot_ly(ctryDataFCleanSub,x=~DayInfect,y=~CurInfectedPerM,color=~Country)
)
,box(width=12
,renderPlotly(plot_ly(ctryDataFCleanSub,x=~DayInfect,y=~ConfirmedPerPopM,color=~Country,type='scatter',mode='lines+markers') %>% layout(title='Confirmed cases per mil popln vs Days since cases hit 1 in 1M popln',annotations = labelList1))
)
)
})
#####################################################################
#####################################################################
#SurvivalData$DischargeCensored=as.factor(SurvivalData$DischargeCensored)
#plot_ly(SurvivalData,x=~Age,y=~HospDays,color=~DischargeCensored,colors=c('red','blue'),type='scatter')
######################################################################
# Data View
######################################################################
})
|
85e6bbf3fb6675c8fa064760ac2c4d8a2fcfa128
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-r/tests/testdir_algos/isofor/runit_isofor_creditfraud_large.R
|
d5c850819e60fe79eb773ead6a1961c130c85fdb
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,785
|
r
|
runit_isofor_creditfraud_large.R
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
# Note: this test uses a Credit Card Fraud dataset licensed under ODbL v1.0
# full ODvL: https://opendatacommons.org/licenses/odbl/1.0/
# additional dataset details: https://www.kaggle.com/mlg-ulb/creditcardfraud/home
test.IsolationForest.creditcardfraud <- function() {
p <- 0.95
ccf_path <- locate("bigdata/laptop/creditcardfraud/creditcardfraud.csv")
## In H2O
creditcardfraud <- h2o.importFile(ccf_path)
h2o_isofor <- h2o.isolationForest(creditcardfraud, x = colnames(creditcardfraud)[1:30], ntrees = 100, seed = 1234)
h2o_anomaly_score <- h2o.predict(h2o_isofor, creditcardfraud)
h2o_anomaly_score$Class <- as.factor(creditcardfraud$Class)
h2o_anomaly_score_local <- as.data.frame(h2o_anomaly_score)
h2o_cm <- table(
h2oForest = h2o_anomaly_score_local$predict > quantile(h2o_anomaly_score_local$predict, p),
Actual = h2o_anomaly_score_local$Class == 1
)
print(h2o_cm)
## In H2O with Early Stopping
h2o_early_isofor <- h2o.isolationForest(creditcardfraud, x = colnames(creditcardfraud)[1:30],
ntrees = 1000, stopping_rounds = 3, score_tree_interval = 5,
seed = 1234)
h2o_early_anomaly_score <- h2o.predict(h2o_early_isofor, creditcardfraud)
h2o_early_anomaly_score$Class <- as.factor(creditcardfraud$Class)
h2o_early_anomaly_score_local <- as.data.frame(h2o_early_anomaly_score)
h2o_early_cm <- table(
h2oEarlyForest = h2o_early_anomaly_score_local$predict > quantile(h2o_early_anomaly_score_local$predict, p),
Actual = h2o_early_anomaly_score_local$Class == 1
)
print(h2o_early_cm)
## With isofor
creditcardfraud_local <- read.csv(ccf_path)
isofor_model <- isofor::iForest(creditcardfraud_local[1:30], seed = 1234)
isofor_anomaly_score <- predict(isofor_model, creditcardfraud_local[1:30])
isofor_cm <- table(
iForest = isofor_anomaly_score > quantile(isofor_anomaly_score, p),
Actual = creditcardfraud_local$Class == 1
)
print(isofor_cm)
## Compare results
# H2O vs isofor
expect_equal(
h2o_cm[2,2] / (h2o_cm[1,2] + h2o_cm[2,2]),
isofor_cm[2,2] / (isofor_cm[1,2] + isofor_cm[2,2]),
tolerance = 0.05, scale = 1
)
# H2O (early stop) vs isofor
expect_equal(
h2o_early_cm[2,2] / (h2o_early_cm[1,2] + h2o_early_cm[2,2]),
isofor_cm[2,2] / (isofor_cm[1,2] + isofor_cm[2,2]),
tolerance = 0.05, scale = 1
)
}
doTest("IsolationForest: Compares Isolation Forest to isofor package in R", test.IsolationForest.creditcardfraud)
|
d7c5ba80ad187b3e646558a5f10db6202106a732
|
d64c2f2ed23d9929951eb3cecc0fc0f456985005
|
/data/os3_data/TabTextToCsv.R
|
51ebd39efa7ecf80fd493349c5a0d748eaf735ca
|
[] |
no_license
|
jbryer/DATA606Fall2018
|
77609a8b9da6403638f2cb00dd850adfd18e7787
|
33abdb03bf34db8730a94d9ecb59813cc0cf1e9b
|
refs/heads/master
| 2020-03-25T02:41:26.441585
| 2019-01-15T22:21:49
| 2019-01-15T22:21:49
| 143,302,830
| 8
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 483
|
r
|
TabTextToCsv.R
|
# Copyright 2014, OpenIntro
# This code is released under a Creative Commons Attribution 3.0 license
# The TabTextToCsv() function can be used to convert
# tab-delimited text file to a CSV file
TabTextToCsv <-
function (file.in, file.out) {
x <- read.delim(file.in)
write.table(x, file.out,
quote = FALSE, sep = ",",
row.names = FALSE)
}
# Example
getwd()
# setwd("reset the current working directory")
TabTextToCsv("smallpox.txt", "smallpox.csv")
|
8755af5e92733f407aaab3041499a911b4d25793
|
4ce0a8e66ad3694a60840ab3cd3c34ea82de94ea
|
/rdev/R/knobs.R
|
59c89977cdce05f35e662323e37a1668be9415ee
|
[] |
no_license
|
curtisKJ/mrgsolve
|
5549fe81796e5dcd7172462624d016abe58bedc5
|
05818cc861e467db08a04b65af59d783bdd16a53
|
refs/heads/master
| 2021-01-17T11:33:21.588950
| 2016-05-17T06:12:23
| 2016-05-17T06:12:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,365
|
r
|
knobs.R
|
## This work is licensed under the Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
## To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-nd/4.0/ or send a letter to
## Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
##' @include mrgsims.R
##' @include classes.R
tran.use <- c("time", "amt", "ii", "rate","ss", "addl","cmt")
tran.alt <- c("Time", "Amt", "Ii", "Rate", "Ss", "Addl", "Cmt")
dot.tran.use <- paste(".", tran.use, sep="")
knobable <- c()
knobable <- c(knobable, tran.use)
protect <- function(x) {
paste("KNOB<", x, ">", sep="")
}
protected <- function(x,logical = FALSE) {
re <- "^KNOB<.*>$"
if(!logical) return(grep(re, x, value=TRUE))
if(logical) return(grepl(re, x))
}
unprotect <- function(x) {
gsub("^\\KNOB<(.+)>$", "\\1", x)
}
##' @title Run sensitivity analysis on model settings
##'
##' @description Knobs can be parameter values or PK dosing items (e.g. amt). By design, all combinations of specified knob/values are simulated.
##'
##' @param x the model object
##' @param carry.out passed to \code{\link{mrgsim}}
##' @param drop defines which knobs to drop in the matrix of simulated data; with \code{drop} = "none", the values of all knobs appear in the simulated data matrix; with \code{drop} = "all", no knob names appear in the simulated data matrix; when \code{drop} is "default", selected non-moving columns related to PK dosing are dropped: \code{cmt}, \code{time}, \code{addl}, \code{ii}, \code{ss}, \code{evid}. In every case, the simulation run settings can be retreived with the \code{batch} method for the \code{batch_mrgsims} output object.
##' @param update a list of arguments that are passed to update prior to running the knobs
##' @param ... knobs: named numeric vectors that identify knob names and knob values for a
##' batch run. See details.
##' @name knobs
##' @return An object of class \code{batch_mrgsims}. Most methods for \code{mrgsims} objects also work on \code{batch_mrgsims} object.
##' @details
##' Valid knob names include: any parameter name (in \code{param(mod)}), time variables (\code{start}, \code{end}, \code{delta}), PK dosing items (\code{amt}, \code{ii}, \code{rate}, and others ...), and solver settings (\code{atol}, \code{hmax}, etc...).
##' @export
##' @examples
##' ## example("knobs")
##'
##' mod <- mrgsolve:::house(end=72)
##'
##' events <- ev(amt=1000, cmt=1, addl=3, ii=12)
##'
##' out <- mod %>% ev(events) %>% knobs(CL=c(1,2,3))
##' plot(out)
##'
##' out
##' moving(out)
##' batch(out)
##'
##'
##' out <- mod %>% ev(events) %>% knobs(CL=c(1,2,3), VC=c(5,20,50))
##' plot(out)
##' plot(out,CP~.)
##' plot(out, CP~time|VC, groups=CL, lty=2)
##'
##' out <- knobs(mod, amt=c(100,300,500), cmt=1,time=0)
##' plot(out)
##'
##' out <- mod %>% knobs(amt=c(100,300), CL=c(1,3),VC=c(5,20), cmt=1, time=0)
##' plot(out)
##' plot(out, CP~.)
##' plot(out, CP~time|CL*VC, groups=Amt)
##'
##' out <- knobs(mod, CL=c(1,2,3), drop="all")
##' out
##'
##' out <- knobs(mod, CL=c(1,2,3), drop="none")
##' out
setGeneric("knobs", function(x,y,...) standardGeneric("knobs"))
##' @export
##' @rdname knobs
setMethod("knobs", c("mrgmod", "missing"), function(x,...,
carry.out=character(0),
drop=c("default", "none" ,"all"),
update=list()) {
drop <- match.arg(drop)
input <- list(...)
keep <- is.element(names(input), c(knobable, pars(x)))
toupdate <- input[!keep]
args <- input[keep]
input$carry.out<- carry.out
input$drop <- drop
input$update <- update
if(length(args)==0) stop("No valid knobs were found.")
toupdate <- merge(toupdate, update, strict=FALSE)
x <- do.call("update", c(x,toupdate))
pass <- list(data=NULL,idata=NULL)
knob.names <- names(args)
moving <- knob.names[sapply(args, length)>1]
nomoving <- length(moving)==0
moving <- paste("", moving, sep="")
param.args <- is.element(names(args), names(param(x)))
tran.args <- is.element(names(args), tran.use)
param.knobs <- names(args)[param.args]
tran.knobs <- names(args)[tran.args]
other.knobs <- names(args)[!param.args & !tran.args]
carry.out <- setdiff(carry.out, param.knobs)
data <- param <- data.frame()
found.data <- sum(tran.args) > 0
found.param <- sum(param.args) > 0
kdata <- expand.grid(args)
kdata$ID <- 1:nrow(kdata)
if(found.data) {
if(!exists("time",kdata)) kdata$time <- 0
if(!exists("evid",kdata)) kdata$evid <- 1
data <- kdata[,unique(c("ID", "evid", "time", tran.knobs)), drop=FALSE]
}
idata <- kdata
if(nrow(data)==0) data <- NULL
carry <- c( param.knobs, tran.knobs)
tran.drop <- c("time", "cmt", "addl", "ss", "evid")
if(drop == "none") tran.drop <- c()
if(drop == "all") tran.drop <- carry
if(drop !="all") tran.drop <- setdiff(tran.drop,moving)
carry <- setdiff(carry,tran.drop)
idata <- idata[, c("ID", intersect(names(idata),carry)), drop=FALSE]
protect.idata <- names(idata) %in% tran.knobs
protect.carry <- carry %in% tran.knobs
names(idata)[protect.idata] <- protect(names(idata)[protect.idata])
carry[protect.carry] <- protect(carry[protect.carry])
out <- mrgsim(x,data=data, idata=idata, carry.out =c(carry.out,carry))
request <- out@request
blah <- mrgsim(x, end=1, delta=1, verbose=FALSE)
out <- as.matrix(out)
dimnames(out) <- list(NULL, unprotect(mapvalues(colnames(out), protect(tran.use),tran.alt,warn_missing=FALSE)))
new("batch_mrgsims", data=out, mod=x, batch=kdata, knobs=names(args),
request=request,
moving=moving,outnames=blah@outnames,input=input)
})
##' @export
##' @rdname knobs
setMethod("knobs", c("mrgmod", "batch_mrgsims"), function(x,y,...) {
input <- merge(y@input, list(...), strict=FALSE)
do.call("knobs", c(list(x),input))
})
##' @export
##' @rdname knobs
##' @param row.names passed to \code{\link{as.data.frame}}
##' @param optional passed to \code{\link{as.data.frame}}
setMethod("as.data.frame","batch_mrgsims", function(x,row.names=NULL, optional=FALSE,...) {
as.data.frame(x@data, row.names,optional,...)
})
##' @export
##' @rdname knobs
##' @param y batch_mrgsims object
setMethod("as.matrix","batch_mrgsims", function(x,y,...) {
x@data
})
##' @export
##' @rdname knobs
setGeneric("batch", function(x,...) standardGeneric("batch"))
##' @export
##' @rdname knobs
setGeneric("moving", function(x,...) standardGeneric("moving"))
##' @export
##' @rdname knobs
setMethod("batch", "batch_mrgsims", function(x,...) {
x@batch
})
##' @export
##' @rdname knobs
setMethod("knobs", "batch_mrgsims", function(x,...) {
x@knobs
})
##' @export
##' @rdname knobs
setMethod("moving", "batch_mrgsims", function(x,...) {
x@moving
})
##' @rdname knobs
##' @export
##' @param object passed to show
setMethod("show", "batch_mrgsims", function(object) {
message("Knobs simulation run summary:")
cat("Model: ", model(mod(object)),"\n")
cat("Batch (head): \n")
print(head(object@batch,n=5))
mov <- object@moving
if(all(mov == "")) mov <- "none"
cat("[",mov, "]\n\n")
cat("Head:\n")
print(head(object@data,n=5))
mov <- mapvalues(object@moving, tran.use, tran.alt, warn_missing=FALSE)
if(all(mov =="")) mov <- "none"
cat("[", mov, "]")
return(invisible(NULL))
})
##' Plot method for mrgsims objects.
##'
##' @param x mrsims object
##' @param y a formula passed to xyplot
##' @param show.grid print grid in the plot
##' @param lwd passed to xyplot
##' @param yval variables to plot
##' @param limit maximum number of yval to plot
##' @param scales passed to xyplot
##' @param auto.key passed to xyplot
##' @param type passed to xyplot
##' @param as transformation for every yval that is plotted
##' @param ... arguments passed to xyplot
##' @export
##' @rdname plot_batch_mrgsims
setMethod("plot", c("batch_mrgsims","missing"), function(x,yval=variables(x),limit=9,...) {
mov <- moving(x)
rename <- mov %in% tran.use
mov[rename] <- mapvalues(mov[rename], tran.use,tran.alt)
data <- as.data.frame(x)
ny <- length(yval)
if(ny>limit) {
if(missing(limit)) warning(paste0("NOTE: showing first ",
limit,
" variables. Check limit argument."
), call.=FALSE)
yval <- yval[1:limit]
}
yval <- paste(yval, collapse="+")
drop <- c()
if(all(mov=="")) {
fmla <- as.formula(paste(yval, "~time", sep=""))
groups <- rep(1,nrow(data))
mov <- character(0)
}
if(length(mov)==1) {
fmla <- as.formula(paste(yval, "~time", sep=""))
groups <- factor(data[,mov[1]], labels=paste(mov[1], sort(unique(data[,mov[1]]))))
}
if(length(mov)>=2) {
labels1 <- paste(mov[2],sort(unique(data[,mov[2]])))
fmla <- as.formula(paste(yval, "~time|factor(",mov[2],",labels=labels1)", sep=""))
groups <- factor(data[,mov[1]], labels=paste(mov[1], sort(unique(data[,mov[1]]))))
if(length(mov) >=3) drop <- mov[3:length(mov)]
}
if(length(mov) >= 3 & ny==1) {
labels1 <- paste(mov[2],sort(unique(data[,mov[2]])))
labels2 <- paste(mov[3],sort(unique(data[,mov[3]])))
fmla <- as.formula(paste(yval, "~time|factor(",mov[2],",labels=labels1)*factor(",mov[3],",labels=labels2)", sep=""))
groups <- factor(data[,mov[1]], labels=paste(mov[1], sort(unique(data[,mov[1]]))))
if(length(mov)<=3) drop <- c()
if(length(mov)>=4) drop <- mov[4:length(mov)]
}
if(length(drop)>=1) {
message("showing only smallest values for ", paste(drop, collapse=','), " in the plot")
data <- as.matrix(x)
retain <- apply(data[,drop, drop=FALSE], MARGIN=2,FUN=min)
retain <-apply(data[,drop,drop=FALSE], MARGIN=1, function(x) all(x==retain))
x@data <- data[retain, , drop=FALSE]
x@moving <- setdiff(mov,drop)
}
plot(x,fmla,..., groups=groups)
})
##' @export
##' @rdname plot_batch_mrgsims
setMethod("plot", c("batch_mrgsims","formula"), function(x,y,
show.grid=TRUE,
lwd=2,
type="l",
as="raw",
auto.key=list(columns=1),
scales=list(y=list(relation='free')),
...) {
requireNamespace("lattice", quietly=TRUE)
if(y[[3]] == '.') {
yval <- all.vars(y[[2]])
return(plot(x,yval=as.character(yval),
show.grid=show.grid,
lwd=lwd, type=type,
auto.key=auto.key,as=as,
scales=scales,...))
}
data <- as.data.frame(x)
if(as=="log") {
scales$y$log="e"
scales$y$at=10^seq(-10,10)
}
if(as=="log10") {
scales$y$log=10
scales$y$at=10^seq(-10,10,1)
}
lattice::xyplot(y,data=data,
type=type,
scales=scales,
drop.unused.levels=TRUE,
lwd=lwd,auto.key=auto.key,
panel=function(...) {
if(show.grid) lattice::panel.grid(h=-1,v=-1)
lattice::panel.xyplot(...)
}, ...)
})
|
2fe9c1f68c45ee33df7b4eb9ce8cbd149ff93eaa
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.sagemaker/man/list_tags.Rd
|
89cec9062453d2191f35d9a0646056c292f61919
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 863
|
rd
|
list_tags.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.sagemaker_operations.R
\name{list_tags}
\alias{list_tags}
\title{Returns the tags for the specified Amazon SageMaker resource}
\usage{
list_tags(ResourceArn, NextToken = NULL, MaxResults = NULL)
}
\arguments{
\item{ResourceArn}{[required] The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.}
\item{NextToken}{If the response to the previous \code{ListTags} request is truncated, Amazon SageMaker returns this token. To retrieve the next set of tags, use it in the subsequent request.}
\item{MaxResults}{Maximum number of tags to return.}
}
\description{
Returns the tags for the specified Amazon SageMaker resource.
}
\section{Accepted Parameters}{
\preformatted{list_tags(
ResourceArn = "string",
NextToken = "string",
MaxResults = 123
)
}
}
|
b77f46e3af3f54027dd11df44e068de65dec6003
|
63be42f1b75b95d6a7e42410f32b260e319b5e10
|
/code/Differential_Expression/123_combined/DE_Village_123_combined_noTreat_batchCorrectionUsingLMandBloodAsCovar_withBlocking_all_villages.R
|
1ae7d4f7e6d6f0fabbf24764d6e22eec9fe98b32
|
[] |
no_license
|
ireneg/SEA_Regulatory_Variation
|
2d2107f21d18715a208cf25123521477e2a1273f
|
499aa1af77a90451688623149147497790b5609a
|
refs/heads/master
| 2020-05-04T13:50:03.358863
| 2019-12-23T06:16:55
| 2019-12-23T06:16:55
| 179,175,671
| 0
| 0
| null | 2019-04-02T23:44:36
| 2019-04-02T23:44:35
| null |
UTF-8
|
R
| false
| false
| 17,229
|
r
|
DE_Village_123_combined_noTreat_batchCorrectionUsingLMandBloodAsCovar_withBlocking_all_villages.R
|
# script created by KSB, 08.08.18
# Perform DE analysing relationship between islands
### Last edit: IGR 2019.10.19
### Changed paths to deal with removal of MPI-296
### 0. Load dependencies and functions and set input paths --------------------------
### 1. Begin analyses and initial QC ----------------------------------------------------------------------------------
### 2. DE testing with duplicate correlation and blocking -----------------------------------------------------
### 3. And now with random subsetting for power reasons... -----------------------------
### TO DO:
### Fix everything that's commented out (just figures)
### Triple check all numbers.
### Repeat this 1000 times.
##############################################################
### 0. Load dependencies and functions and set input paths ###
##############################################################
# Load dependencies:
library(edgeR)
library(plyr)
library(RColorBrewer)
library(ggplot2)
library(ggsignif)
library(viridis)
library(circlize)
library(ComplexHeatmap)
library(VennDiagram)
library(UpSetR)
library(matrixStats)
library(reshape)
library(wesanderson)
# Set paths:
inputdir <- "/data/cephfs/punim0586/igallego/indoRNA/de_testing/no_mpi296/" # on server
covariatedir <- "/data/cephfs/punim0586/igallego/indoRNA/"
# Set output directory and create it if it does not exist:
outputdir <- "/data/cephfs/punim0586/igallego/indoRNA/de_testing/no_mpi296/"
edaoutput <- paste0(outputdir, "eda/")
if (file.exists(outputdir) == FALSE){
dir.create(outputdir, recursive=T)
dir.create(edaoutput, recursive=T)
}
# Load colour schemes:
mappi <- wes_palette("Zissou1", 20, type = "continuous")[20]
mentawai <- wes_palette("Zissou1", 20, type = "continuous")[1]
sumba <- wes_palette("Zissou1", 20, type = "continuous")[11]
smb_mtw <- wes_palette("Darjeeling1", 9, type = "continuous")[3]
smb_mpi <- wes_palette("Darjeeling1", 9, type = "continuous")[7]
mtw_mpi <- "darkorchid4"
# Load log CPM matrix and y object:
# lcpm
load(paste0(inputdir, "indoRNA.logCPM.TMM.filtered.Rda"))
# y DGE list object
load(paste0(inputdir, "indoRNA.read_counts.TMM.filtered.Rda"))
########################################
### 1. Begin analyses and initial QC ###
########################################
# First, remove samples that have less than ten individuals per village
table(yFilt$samples$Sampling.Site)
# Anakalung Bilarenge Hupu Mada Madobag Mappi Padira Tana
# 20 1 5 17 20 3
# Patiala Bawa Rindi Taileleu Wunga Wura Homba
# 1 5 32 17 1
# remove Bilarenge, Hupu Mada, Padira Tana, Patiala Bawa, Rindi, and Wura Homba
yVillage <- yFilt[,-grep("Bilarenge|Patiala Bawa|Wura Homba", yFilt$samples$Sampling.Site)]
# drop unused levels
yVillage$samples <- droplevels(yVillage$samples)
# Set up design matrix
design <- model.matrix(~0 + yVillage$samples$Sampling.Site + yVillage$samples$Age + yVillage$samples$batch + yVillage$samples$RIN + yVillage$samples$CD8T + yVillage$samples$CD4T + yVillage$samples$NK + yVillage$samples$Bcell + yVillage$samples$Mono + yVillage$samples$Gran)
# rename columns to exclude spaces and unrecognised characters
colnames(design)=gsub("yVillage\\$samples\\$", "", colnames(design))
colnames(design)=gsub("Sampling.Site", "", colnames(design))
colnames(design)=gsub(" ", "_", colnames(design))
# set up contrast matrix
contr.matrix <- makeContrasts( ANKvsMDB=Anakalung-Madobag, ANKvsMPI=Anakalung-Mappi, ANKvsTLL=Anakalung-Taileleu, ANKvsWNG=Anakalung-Wunga, ANKvsRIN = Anakalung-Rindi, ANKvsHPM = Anakalung-Hupu_Mada, ANKvsPDT = Anakalung-Padira_Tana,
WNGvsMDB=Wunga-Madobag, WNGvsMPI=Wunga-Mappi, WNGvsTLL=Wunga-Taileleu, WNGvsRIN = Wunga-Rindi, WNGvsHPM = Wunga-Hupu_Mada, WNGvsPDT = Wunga-Padira_Tana,
RINvsMDB= Rindi-Madobag, RINvsTLL= Rindi-Taileleu, RINvsMPI= Rindi-Mappi, RINvsHPM = Rindi-Hupu_Mada, RINvsPDT=Rindi-Padira_Tana,
HPMvsMDB= Hupu_Mada-Madobag, HPMvsTLL= Hupu_Mada-Taileleu, HPMvsMPI= Hupu_Mada-Mappi, HPMvsPDT=Hupu_Mada-Padira_Tana,
PDTvsMDB = Padira_Tana-Madobag, PDTvsTLL = Padira_Tana-Taileleu, PDTvsMPI = Padira_Tana-Mappi,
MDBvsMPI=Madobag-Mappi, MDBvsTLL=Madobag-Taileleu,
TLLvsMPI=Taileleu-Mappi,
levels=colnames(design)) # Contrasts are ordered in the same order as the island ones, in case we want to look at directional effects
yVillage <- calcNormFactors(yVillage, method="TMM")
#############################################################
### 2. DE testing with duplicate correlation and blocking ###
#############################################################
# create a new variable for blocking using sample IDs
yVillage$samples$ind <- sapply(strsplit(as.character(yVillage$samples$samples), "[_.]"), `[`, 1)
# First, we need to perform voom normalisation
# No normalisation between samples beyond tmm and voom:
voomNoNorm <- voom(yVillage, design, normalize.method="none", plot=F)
dupcorNone <- duplicateCorrelation(voomNoNorm, design, block=yVillage$samples$ind) # 19 non-convergences
# The value dupcor$consensus estimates the average correlation within the blocks and should be positive
dupcorNone$consensus # sanity check
# [1] 0.6847412
median(voomNoNorm$weights) # another sanity check:
# [1] 23.39444
save(voomNoNorm, file=paste0(outputdir, "voomNoNorm.tmm.filtered.indoRNA.village.all_villages.Rda"))
# Second round:
voomNoNormDup <- voom(yVillage, design, plot=F, block=yVillage$samples$ind, correlation=dupcorNone$consensus)
dupcorNoneDup <- duplicateCorrelation(voomNoNormDup, design, block=yVillage$samples$ind) # 19 non convergences
dupcorNoneDup$consensus # sanity check pt 2
# [1] 0.6850491
median(voomNoNormDup$weights) # another sanity check, pt 2
# [1] 22.92187
pdf(file=paste0(edaoutput, "voomNoNorm.tmm.filtered.indoRNA.densities.village.all_villages.pdf"))
plotDensities(voomNoNormDup, group=yVillage$samples$batch)
plotDensities(voomNoNormDup, group=yVillage$samples$Island)
dev.off()
save(voomNoNormDup, file=paste0(outputdir, "voomNoNorm.tmm.filtered.duplicate_corrected.indoRNA.village.all_villages.Rda"))
# DE testing:
# the inter-subject correlation is input into the linear model fit
voomNoNormDupVfit <- lmFit(voomNoNormDup, design, block=yVillage$samples$ind, correlation=dupcorNoneDup$consensus)
voomNoNormDupVfit <- contrasts.fit(voomNoNormDupVfit, contrasts=contr.matrix)
voomNoNormDupEfit <- eBayes(voomNoNormDupVfit, robust=T)
# pdf(file=paste0(edaoutput, "voomNoNorm.tmm.filtered.indoRNA.mean-variance-trend.village.all_villages.pdf"))
# plotSA(voomNoNormDupEfit, main="Mean-variance trend elimination with duplicate correction")
# dev.off()
# get top genes using toptable
allDEresults <- list()
for(i in 1:28){
allDEresults[[i]] <- topTable(voomNoNormDupEfit, coef=i, n=Inf, sort.by="p")
}
summary(decideTests(voomNoNormDupEfit, method="separate", adjust.method = "BH", p.value = 0.01))
summary(decideTests(voomNoNormDupEfit, method="separate", adjust.method = "BH", p.value = 0.01, lfc=0.5))
summary(decideTests(voomNoNormDupEfit, method="separate", adjust.method = "BH", p.value = 0.01, lfc=1))
# for (i in 1:28){
# write.table(allDEresults[[i]], file=paste0(outputdir,"topTable.voomNoNorm.tmm.filtered.dup_corrected.village.all_villages.", colnames(contr.matrix)[i], ".txt"))
# }
##############################################################
### 3. And now with random subsetting for power reasons... ###
##############################################################
# Let's drop PadiraTana because even I agree that three samples is ridiculous.
yVillage5 <- yVillage[,-grep("Padira Tana", yVillage$samples$Sampling.Site)]
yVillage5$samples <- droplevels(yVillage5$samples) # drop unused levels
# Since individual seeds were so dubious, I'm running this using a counter - 1000 random draws without replicates in there.
# God bless the internet:
# https://stackoverflow.com/questions/20507247/r-repeat-function-until-condition-met
deTestingSubsets <- function(testingData) {
# draw the sample
toKeep <- by(testingData$samples, testingData$samples$Sampling.Site, function(x) sample(x$samples, 5, replace=F))
yVillageSub <- testingData[,grepl(paste(unlist(toKeep), collapse= '|'), testingData$samples$samples)]
cleanSubset <- length(unique(yVillageSub$samples$ID)) == 35
while (!cleanSubset) {
toKeep <- by(testingData$samples, testingData$samples$Sampling.Site, function(x) sample(x$samples, 5, replace=F))
yVillageSub <- testingData[,grepl(paste(unlist(toKeep), collapse= '|'), testingData$samples$samples)]
cleanSubset <- length(unique(yVillageSub$samples$ID)) == 35
}
# # A sanity check for debugging - output the list of samples used:
# print("doing DE testing with the following samples:")
# print(yVillageSub$samples$ID)
# Once we are happy with our subset, set up design and contrast matrices:
design5 <- model.matrix(~0 + yVillageSub$samples$Sampling.Site + yVillageSub$samples$Age + yVillageSub$samples$batch + yVillageSub$samples$RIN + yVillageSub$samples$CD8T + yVillageSub$samples$CD4T + yVillageSub$samples$NK + yVillageSub$samples$Bcell + yVillageSub$samples$Mono + yVillageSub$samples$Gran)
# rename columns to exclude spaces and unrecognised characters
colnames(design5)=gsub("yVillageSub\\$samples\\$", "", colnames(design5))
colnames(design5)=gsub("Sampling.Site", "", colnames(design5))
colnames(design5)=gsub(" ", "_", colnames(design5))
contr.matrix5 <- makeContrasts( ANKvsMDB=Anakalung-Madobag, ANKvsMPI=Anakalung-Mappi, ANKvsTLL=Anakalung-Taileleu, ANKvsWNG=Anakalung-Wunga, ANKvsRIN = Anakalung-Rindi, ANKvsHPM = Anakalung-Hupu_Mada,
WNGvsMDB=Wunga-Madobag, WNGvsMPI=Wunga-Mappi, WNGvsTLL=Wunga-Taileleu, WNGvsRIN = Wunga-Rindi, WNGvsHPM = Wunga-Hupu_Mada,
RINvsMDB= Rindi-Madobag, RINvsTLL= Rindi-Taileleu, RINvsMPI= Rindi-Mappi, RINvsHPM = Rindi-Hupu_Mada,
HPMvsMDB= Hupu_Mada-Madobag, HPMvsTLL= Hupu_Mada-Taileleu, HPMvsMPI= Hupu_Mada-Mappi,
MDBvsMPI=Madobag-Mappi, MDBvsTLL=Madobag-Taileleu,
TLLvsMPI=Taileleu-Mappi,
levels=colnames(design5)) # Contrasts are ordered in the same order as the island ones, in case we want to look at directional effects
yVillageSub <- calcNormFactors(yVillageSub, method="TMM")
# There are not enough reps in here to use blocking, so we'll do it without the blocking:
voomNoNorm5 <- voom(yVillageSub, design5, normalize.method="none", plot=F)
voomNoNormVfit5 <- lmFit(voomNoNorm5, design5)
voomNoNormVfit5 <- contrasts.fit(voomNoNormVfit5, contrasts=contr.matrix5)
voomNoNormEfit5 <- eBayes(voomNoNormVfit5, robust=T)
# get top genes using toptable
allDEresultsNoDup5 <- list()
for(i in 1:21){
allDEresultsNoDup5[[i]] <- topTable(voomNoNormEfit5, coef=i, n=Inf, sort.by="p")
}
deTesting <- summary(decideTests(voomNoNormEfit5, method="separate", adjust.method = "BH", p.value = 0.01, lfc=0.5))
return(deTesting)
}
set.seed(110584) # Let's go with this one.
iterations <- 1000
allDESummaries <- list()
allDESummaries <- replicate(iterations, deTestingSubsets(yVillage5), simplify=FALSE)
# Now some analyses...
# Every entry is three rows, so first is to sum DE genes for every test:
deGenesTables <- ldply(allDESummaries, function(x) colSums(x[c(1,3),]))
summary(deGenesTables) # Yeah this is hard to interpret, so probably worth putting aside for now. What about n = 15 instead, and repeating with those? There's no variability in Madobag, though, so maybe 10 is a good compromise
deGenesPlotting <- melt(deGenesTables)
deGenesPlotting[deGenesPlotting == 0] <- NA
pdf(paste0(edaoutput, "subsampling_densities.pdf"), width=18)
ggplot(deGenesPlotting, aes(x=value, colour=variable, fill=variable, group=variable)) +
geom_density() +
xlim(0,200) +
theme_bw() +
# facet_wrap(. ~ variable, nrow = 5, ncol = 5) +
labs(title="", y="density", x="DE genes") +
theme(legend.title=element_blank(), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
guides(colour=F, fill=F)
dev.off()
# OK and now that we know what village is weird, let's just plot the RINs out by village, because this is soooo bizarre.
pdf(paste0(edaoutput, "RIN_by_village.pdf"))
ggplot(yFilt$samples, aes(x=Sampling.Site, y=RIN, fill=Sampling.Site)) +
geom_violin(trim=T) +
geom_boxplot(width=0.05, fill="white") +
theme_bw() +
labs(title="", y="RIN", x="") +
theme(legend.title=element_blank(), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
guides(fill=F)
dev.off()
##########################################
### 4. Random subsetting to 10 inds... ###
##########################################
# Let's drop small villages:
yVillage10 <- yVillage[,-grep("Padira Tana|Rindi|Hupu Mada", yVillage$samples$Sampling.Site)]
yVillage10$samples <- droplevels(yVillage10$samples) # drop unused levels
table(yVillage10$samples$Sampling.Site)
# Anakalung Madobag Mappi Taileleu Wunga
# 20 17 20 32 17
deTestingSubsets10 <- function(testingData) {
# draw the sample
toKeep <- by(testingData$samples, testingData$samples$Sampling.Site, function(x) sample(x$samples, 10, replace=F))
yVillageSub <- testingData[,grepl(paste(unlist(toKeep), collapse= '|'), testingData$samples$samples)]
cleanSubset <- length(unique(yVillageSub$samples$ID)) == 50
while (!cleanSubset) {
toKeep <- by(testingData$samples, testingData$samples$Sampling.Site, function(x) sample(x$samples, 10, replace=F))
yVillageSub <- testingData[,grepl(paste(unlist(toKeep), collapse= '|'), testingData$samples$samples)]
cleanSubset <- length(unique(yVillageSub$samples$ID)) == 50
}
# # A sanity check for debugging - output the list of samples used:
print("doing DE testing with the following samples:")
print(yVillageSub$samples$ID)
# Once we are happy with our subset, set up design and contrast matrices:
design <- model.matrix(~0 + yVillageSub$samples$Sampling.Site + yVillageSub$samples$Age + yVillageSub$samples$batch + yVillageSub$samples$RIN + yVillageSub$samples$CD8T + yVillageSub$samples$CD4T + yVillageSub$samples$NK + yVillageSub$samples$Bcell + yVillageSub$samples$Mono + yVillageSub$samples$Gran)
# rename columns to exclude spaces and unrecognised characters
colnames(design)=gsub("yVillageSub\\$samples\\$", "", colnames(design))
colnames(design)=gsub("Sampling.Site", "", colnames(design))
colnames(design)=gsub(" ", "_", colnames(design))
contr.matrix <- makeContrasts( ANKvsMDB=Anakalung-Madobag, ANKvsMPI=Anakalung-Mappi, ANKvsTLL=Anakalung-Taileleu, ANKvsWNG=Anakalung-Wunga,
WNGvsMDB=Wunga-Madobag, WNGvsMPI=Wunga-Mappi, WNGvsTLL=Wunga-Taileleu,
MDBvsMPI=Madobag-Mappi, MDBvsTLL=Madobag-Taileleu,
TLLvsMPI=Taileleu-Mappi,
levels=colnames(design)) # Contrasts are ordered in the same order as the island ones, in case we want to look at directional effects
yVillageSub <- calcNormFactors(yVillageSub, method="TMM")
# There are not enough reps in here to use blocking, so we'll do it without the blocking:
voomNoNorm <- voom(yVillageSub, design, normalize.method="none", plot=F)
voomNoNormVfit <- lmFit(voomNoNorm, design)
voomNoNormVfit <- contrasts.fit(voomNoNormVfit, contrasts=contr.matrix)
voomNoNormEfit <- eBayes(voomNoNormVfit, robust=T)
# get top genes using toptable
allDEresultsNoDup <- list()
for(i in 1:10){
allDEresultsNoDup[[i]] <- topTable(voomNoNormEfit, coef=i, n=Inf, sort.by="p")
}
deTesting <- summary(decideTests(voomNoNormEfit, method="separate", adjust.method = "BH", p.value = 0.01, lfc=0.5))
return(deTesting)
}
set.seed(110584) # Let's go with this one.
iterations <- 1000
allDESummaries10 <- list()
allDESummaries10 <- replicate(iterations, deTestingSubsets10(yVillage10), simplify=FALSE)
# Now some analyses...
# Every entry is three rows, so first is to sum DE genes for every test:
deGenesTables10 <- ldply(allDESummaries10, function(x) colSums(x[c(1,3),]))
summary(deGenesTables10) # Yeah this is hard to interpret, so probably worth putting aside for now. What about n = 15 instead, and repeating with those? There's no variability in Madobag, though, so maybe 10 is a good compromise
write.table(deGenesTables10, file=paste0(outputdir, "DE_subsampling_10_inds.txt"), quote=F, row.names=F, col.names=T, sep="\t", eol="\n")
#And now... some t.tests:
t.test(deGenesTables10$ANKvsMPI, deGenesTables10$WNGvsMPI)$p.value
# [1] 7.450892e-196
t.test(deGenesTables10$MDBvsMPI, deGenesTables10$TLLvsMPI)$p.value
# [1] 4.719657e-216
|
15230aabe7bc928a98185fc05824d586d042e29b
|
aba010bbf8165acc06349b629ddf4593628325de
|
/Summary_NetworkRevolution.R
|
64c3d459934c6517509e58ade67b35182cdb2468
|
[] |
no_license
|
karafede/Energy
|
f2557d0e7e6b5ca1141177c836b2f2f2dc610355
|
9b3f8a0b344dd01fb9944ac85e326d28e1fa615e
|
refs/heads/master
| 2021-01-10T09:40:27.074294
| 2016-03-22T11:32:35
| 2016-03-22T11:32:35
| 54,454,862
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58,771
|
r
|
Summary_NetworkRevolution.R
|
library(threadr)
library(readr)
library(openair)
library(ggplot2)
library(plotly)
library(dplyr)
#### Load Residential Customer data ################################################
Basic_Profile_ID <- read_csv("C:/NetworkRevolution/TC1a/Electricity_data_TC1a.csv")
Basic_Profile_hour <- read_csv("C:/NetworkRevolution/TC1a/Electricity_data_TC1a_hour.csv")
Basic_Profile_ID$Sum_Electricity_kWh[Basic_Profile_ID$Sum_Electricity_kWh < 0] <- NA
Basic_Profile_hour$Sum_Electricity_kWh[Basic_Profile_hour$Sum_Electricity_kWh < 0] <- NA
### find ouliers
Outliers_Basic <- Basic_Profile_hour %>%
arrange(-Sum_Electricity_kWh)
### filter outliers
##### all OK !!!! ##########################################
Enhanced_Profile_ID <- read_csv("C:/NetworkRevolution/TC2a/Electricity_data_TC2a.csv")
Enhanced_Profile_hour <- read_csv("C:/NetworkRevolution/TC2a/Electricity_data_TC2a_hour.csv")
Enhanced_Profile_ID$Sum_Electricity_kWh[Enhanced_Profile_ID$Sum_Electricity_kWh < 0] <- NA
Enhanced_Profile_hour$Sum_Electricity_kWh[Enhanced_Profile_hour$Sum_Electricity_kWh < 0] <- NA
### find ouliers
Outliers_Enhanced <- Enhanced_Profile_hour %>%
arrange(-Sum_Electricity_kWh)
### filter outliers
Enhanced_Profile_ID <- Enhanced_Profile_ID[!Enhanced_Profile_ID$location_id == 10174,]
Enhanced_Profile_hour <- Enhanced_Profile_hour[!Enhanced_Profile_hour$location_id == 10174,]
Enhanced_Profile_ID <- Enhanced_Profile_ID[!Enhanced_Profile_ID$location_id == 10221,]
Enhanced_Profile_hour <- Enhanced_Profile_hour[!Enhanced_Profile_hour$location_id == 10221,]
Enhanced_Profile_ID <- Enhanced_Profile_ID[!Enhanced_Profile_ID$location_id == 10017,]
Enhanced_Profile_hour <- Enhanced_Profile_hour[!Enhanced_Profile_hour$location_id == 10017,]
Enhanced_Profile_ID <- Enhanced_Profile_ID[!Enhanced_Profile_ID$location_id == 10044,]
Enhanced_Profile_hour <- Enhanced_Profile_hour[!Enhanced_Profile_hour$location_id == 10044,]
#################################
Heat_Pumps_Profile_ID <- read_csv("C:/NetworkRevolution/TC3/Electricity_data_TC3.csv")
Heat_Pumps_Profile_hour <- read_csv("C:/NetworkRevolution/TC3/Electricity_data_TC3_hour.csv")
Heat_Pumps_Profile_ID$Sum_Electricity_kW[Heat_Pumps_Profile_ID$Sum_Electricity_kW < 0] <- NA
Heat_Pumps_Profile_hour$Sum_Electricity_kW[Heat_Pumps_Profile_hour$Sum_Electricity_kW < 0] <- NA
### find ouliers
Outliers_Heat_Pumps <- Heat_Pumps_Profile_hour %>%
arrange(-Sum_Electricity_kW)
### filter outliers
##### all OK !!!! ##########################################
Temperature_ID <- read_csv("C:/NetworkRevolution/TC3/Temperature_data_TC3.csv")
Temperature_hour <- read_csv("C:/NetworkRevolution/TC3/Temperature_data_TC3_hour.csv")
colnames(Temperature_ID)[10] <- "Temperature"
colnames(Temperature_hour)[11] <- "Temperature"
### find ouliers
Outliers_Temperature <- Temperature_hour %>%
arrange(-Temperature)
### filter outliers
##### all OK !!!! ##########################################
SolarPV_Profile_ID <- read_csv("C:/NetworkRevolution/TC5/Electricity_data_TC5.csv")
SolarPV_Profile_hour <- read_csv("C:/NetworkRevolution/TC5/Electricity_data_TC5_hour.csv")
### find ouliers
Outliers_SolarPV <- SolarPV_Profile_hour %>%
arrange(-Sum_Electricity_kW)
### filter outliers
##### all OK !!!! ##########################################
Carbon_ID_TC5 <- read_csv("C:/NetworkRevolution/TC5/Carbon_data_TC5.csv")
############################################################
EV_Profile_ID <- read_csv("C:/NetworkRevolution/TC6/Electricity_data_TC6.csv")
EV_Profile_hour <- read_csv("C:/NetworkRevolution/TC6/Electricity_data_TC6_hour.csv")
### find ouliers
Outliers_EV_hour <- EV_Profile_hour %>%
arrange(-Sum_Electricity_kWh)
### filter outliers
##### all OK !!!! ##########################################
Smart_Profile_ID <- read_csv("C:/NetworkRevolution/TC9a/Electricity_data_TC9a.csv")
Smart_Profile_hour <- read_csv("C:/NetworkRevolution/TC9a/Electricity_data_TC9a_hour.csv")
### find ouliers
Outliers_Smart <- Smart_Profile_hour %>%
arrange(-Sum_Electricity_kWh)
### filter outliers
##### all OK !!!! ##########################################
SolarPV_Auto_ID <- read_csv("C:/NetworkRevolution/TC20_Auto/Electricity_data_TC20_Auto.csv")
SolarPV_Auto_hour <- read_csv("C:/NetworkRevolution/TC20_Auto/Electricity_data_TC20_Auto_hour.csv")
### find ouliers
Outliers_PVAuto <- SolarPV_Auto_hour %>%
arrange(-Sum_Parameter)
### filter outliers
##### all OK !!!! ##########################################
SolarPV_Manual_ID <- read_csv("C:/NetworkRevolution/TC20IHD/Electricity_data_TC20IHD.csv")
SolarPV_Manual_hour <- read_csv("C:/NetworkRevolution/TC20IHD/Electricity_data_TC20IHD_hour.csv")
### find ouliers
Outliers_PVManual <- SolarPV_Manual_hour %>%
arrange(-Sum_Electricity_kW)
Carbon_TC20HID <- read_csv("C:/NetworkRevolution/TC20IHD/Carbon_data_T20IHD.csv")
#############################################################################
SMEs_ID <- read_csv("C:/NetworkRevolution/TC1b/Electricity_data_TC1b_ID.csv")
SMEs_hour <- read_csv("C:/NetworkRevolution/TC1b/Electricity_data_TC1b_hour.csv")
### find ouliers
Outliers_SMEs <- SMEs_hour %>%
arrange(-Sum_Electricity_kWh)
### filter outliers
##### all OK !!!! ##########################################
################################################################################
################################################################################
############################## TC1a ############################################
################################################################################
################################################################################
Basic_Profile_ID <- subset(Basic_Profile_ID, !is.na(mosaic_class))
Basic_Profile_hour <- subset(Basic_Profile_hour, !is.na(mosaic_class))
## Plot data
Basic_Profile_hour %>%
filter(location_id == 1) %>% ggplot(aes(hour, Sum_Electricity_kWh)) + geom_line()
jpeg('C:/NetworkRevolution/Plots/Cumulative_Basic_ID_TC1a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Basic_Profile_ID,
aes(mosaic_class, Sum_Electricity_kWh, fill = mosaic_class)) +
geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr. Cons. (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Electricity Consumption (Basic Profiles)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
########## TIME TREND #########################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_all_Classes_Basic_Profiels(single)_TC1a.jpg',
quality = 100, bg = "white", res = 200, width = 18, height = 11, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Basic_Profile_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + facet_wrap( ~ mosaic_class, ncol = 4) + #scales="free_y"
guides(fill=FALSE) +
theme( strip.text = element_text(size = 15)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "red")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend of Consumption by class (Basic profile)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
q
par(oldpar)
dev.off()
# Sys.setenv("plotly_username" = "karafede")
# Sys.setenv("plotly_api_key" = "v516efgsn7")
# (gg <- ggplotly(q))
# plotly_POST(gg, filename = "home/prova_FK/Basic_Profile_Electricity_UK")
###################################################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_all_Classes_TC1a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Basic_Profile_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend of Consumption (all classes)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
q
par(oldpar)
dev.off()
#########################################################################
Alpha_Territory <-Basic_Profile_hour %>%
filter(mosaic_class == "Alpha Territory")
Ex_Council_Community <-Basic_Profile_hour %>%
filter(mosaic_class == "Ex-Council Community")
Upper_Floor_Living <-Basic_Profile_hour %>%
filter(mosaic_class == "UPPER FLOOR Living")
Active_Retirement <-Basic_Profile_hour %>%
filter(mosaic_class == "Active Retirement")
Suburban_Mindsets <-Basic_Profile_hour %>%
filter(mosaic_class == "Suburban Mindsets")
New_Homemakers <-Basic_Profile_hour %>%
filter(mosaic_class == "NEW Homemakers")
Terraced_Melting_Pot <-Basic_Profile_hour %>%
filter(mosaic_class == "Terraced Melting Pot")
################################################################################
jpeg('C:/NetworkRevolution/Plots/Ex_Council_Community_hour_TC1a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Ex_Council_Community,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Ex Council Community consumption") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
q
par(oldpar)
dev.off()
############################
#### Diplay time when maximum consumption was measured ######################
Basic_Profile_max <- Basic_Profile_hour %>%
group_by(location_id) %>%
arrange(-Sum_Electricity_kWh) %>% ### from the biggest to the smaller (decreasing order)
slice(1) %>%
ungroup()
colnames(Basic_Profile_max)[10] <- "max_hour"
jpeg('C:/NetworkRevolution/Plots/Max_Consumption_all_classes_TC1a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Basic_Profile_max,
aes(max_hour, Sum_Electricity_kWh, fill = max_hour)) + guides(fill=FALSE) +
geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr. Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Maximum Electricity Consumption (all classes)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
p
par(oldpar)
dev.off()
#############################################
jpeg('C:/NetworkRevolution/Plots/Max_Consumption_by_class_TC1a.jpg',
quality = 100, bg = "white", res = 200, width = 18, height = 11, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Basic_Profile_max,
aes(max_hour, Sum_Electricity_kWh, fill = max_hour)) + guides(fill=FALSE) +
geom_bar(stat = "identity") + facet_wrap( ~ mosaic_class, ncol = 4, scales="free_y") +
guides(fill=FALSE) +
theme( strip.text = element_text(size = 15)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr. Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Maximum Electricity Consumption by class") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
################################################################################
################################################################################
################################################################################
############################## TC2a ############################################
################################################################################
################################################################################
### Filter Total Property and Upstair Lights ##########################
Enhanced_Profile_ID <- subset(Enhanced_Profile_ID, !is.na(measurement_description))
Enhanced_Profile_hour <- subset(Enhanced_Profile_hour, !is.na(measurement_description))
Enhanced_Profile_ID <- subset(Enhanced_Profile_ID, !is.na(Sum_Electricity_kWh))
Enhanced_Profile_hour <- subset(Enhanced_Profile_hour, !is.na(Sum_Electricity_kWh))
Enhanced_Profile_ID <- Enhanced_Profile_ID[!Enhanced_Profile_ID$measurement_description == "Total Property",]
jpeg('C:/NetworkRevolution/Plots/Cumulative_Enhanced_Filtered_ID_TC2a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Enhanced_Profile_ID,
aes(measurement_description, Sum_Electricity_kWh, fill = measurement_description)) +
geom_bar(stat = "identity") + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr. Cons. (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Electricity Consumption by Utility (Enhanced Profiles)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
########################################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_all_Utilities_TC2a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Enhanced_Profile_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend of Consumption (all utilities)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
q
par(oldpar)
dev.off()
################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_by_Utility_TC2a.jpg',
quality = 100, bg = "white", res = 200, width = 18, height = 11, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Enhanced_Profile_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + facet_wrap( ~ measurement_description, ncol = 4, scales="free_y") +
guides(fill=FALSE) +
theme( strip.text = element_text(size = 15)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend of Consumption by utility") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
q
par(oldpar)
dev.off()
#########################################################################
Total_Property <-Enhanced_Profile_hour %>%
filter(measurement_description == "Total Property")
Upstairs_Lights <-Enhanced_Profile_hour %>%
filter(measurement_description == "Upstairs lights")
Electric_Heater <-Enhanced_Profile_hour %>%
filter(measurement_description == "Electric heater")
Cooker <- Enhanced_Profile_hour %>%
filter(measurement_description == "Cooker")
Washing_Machine <- Enhanced_Profile_hour %>%
filter(measurement_description == "Washing Machine")
##########################################################################
jpeg('C:/NetworkRevolution/Plots/Total_Property_hour_TC2a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Total_Property,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Total Property consumption") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
q
par(oldpar)
dev.off()
#############################
#### Diplay time when maximum consumption was measured ######################
Enhanced_Profile_max <- Enhanced_Profile_hour %>%
group_by(location_id) %>%
arrange(-Sum_Electricity_kWh) %>%
slice(1) %>%
ungroup()
colnames(Enhanced_Profile_max)[10] <- "max_hour"
jpeg('C:/NetworkRevolution/Plots/Max_Consumption_all_utilities_TC2a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Enhanced_Profile_max,
aes(max_hour, Sum_Electricity_kWh, fill = max_hour)) +
geom_bar(stat = "identity") + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr. Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Maximum Electricity Consumption (all utilities)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
################################################################################
################################################################################
############################## TC3 ############################################
################################################################################
################################################################################
jpeg('C:/NetworkRevolution/Plots/Cumulative_Heat_Pumps_ID_TC3.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Heat_Pumps_Profile_ID,
aes(measurement_description, Sum_Electricity_kW, fill = measurement_description)) +
geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=0.5,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power Consumption & Import (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Power import and Power consumption") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 15))
p
par(oldpar)
dev.off()
#############################################################################
Heat_Pump_Power <- Heat_Pumps_Profile_hour %>%
filter(measurement_description == "heat pump power consumption")
Whole_home_Power <-Heat_Pumps_Profile_hour %>%
filter(measurement_description == "whole home power import")
###########################################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_Heat_Pumps&Power_TC3.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Heat_Pumps_Profile_hour,
aes(hour, Sum_Electricity_kW, fill = hour)) +
geom_bar(stat = "identity") + facet_grid(. ~ measurement_description) + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend for Heat Pumps consumption & Home power import") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
q
par(oldpar)
dev.off()
############################################################################
#### Diplay time when maximum consumption was measured ######################
Power_max <- Heat_Pumps_Profile_hour %>%
group_by(location_id,
measurement_description) %>%
arrange(-Sum_Electricity_kW) %>%
slice(1) %>%
ungroup()
colnames(Power_max)[10] <- "max_hour"
jpeg('C:/NetworkRevolution/Plots/Maximum_Heat_Pumps&Power_TC3.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Power_max,
aes(max_hour, Sum_Electricity_kW, fill = max_hour)) +
geom_bar(stat = "identity") + facet_grid(. ~ measurement_description) + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Maximum Heat Pumps consumption & Home power import") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
####### Temperature data ###################################################
############################################################################
jpeg('C:/NetworkRevolution/Plots/Average_Temperature_Heat_Pumps_ID_TC3.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Temperature_ID,
aes(x = measurement_description, y = Temperature, fill = measurement_description)) +
stat_summary(fun.y=mean, geom="bar") +
# theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
# theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Average Temperature (Celsius)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Average External & Zone 1 Temperature") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
#############################################################################
External_Temp <- Temperature_hour %>%
filter(measurement_description == "External temperature")
Zone_1_Temp <-Temperature_hour %>%
filter(measurement_description == "Zone 1 temperature")
############################################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_Temperature_TC3.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Temperature_hour,
aes(x = hour, y = Temperature, fill = hour)) +
stat_summary(fun.y=mean, geom="bar") + facet_grid(. ~ measurement_description) + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Temperature (Celsius)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend External and Zone 1 temperature") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
q
par(oldpar)
dev.off()
#### Diplay time when maximum consumption was measured ######################
Temp_max <- Temperature_hour %>%
group_by(location_id,
measurement_description) %>%
arrange(-Temperature) %>%
slice(1) %>%
ungroup()
colnames(Temp_max)[10] <- "max_hour"
# jpeg('C:/NetworkRevolution/Plots/Max_Temperature_TC3.jpg',
# quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
# par(mar=c(4, 10, 9, 2) + 0.3)
# oldpar <- par(las=1)
#
# q <- ggplot(data = Temp_max,
# aes(x = max_hour, y = Temperature, fill = max_hour)) +
# stat_summary(fun.y=mean, geom="bar") + facet_grid(. ~ measurement_description) + guides(fill=FALSE) +
# theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
# theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
# theme(axis.title.x = element_blank()) + # Remove x-axis label
# ylab("Temperature (Celsius)") + # Set y-axis label
# theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
# axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
# xlab("hour") + # Set y-axis label
# theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
# axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
# ggtitle("Maximum External and Zone 1 temperature") +
# theme(plot.title = element_text(lineheight=.8, face="bold"))
# q
#
# par(oldpar)
# dev.off()
################################################################################
################################################################################
############################## TC5 ############################################
################################################################################
################################################################################
SolarPV_Profile_ID <- subset(SolarPV_Profile_ID, !is.na(measurement_description))
SolarPV_Profile_ID <- subset(SolarPV_Profile_ID, !is.na(Sum_Electricity_kW))
jpeg('C:/NetworkRevolution/Plots/Cumulative_SolarPV_TC5.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = SolarPV_Profile_ID,
aes(measurement_description, Sum_Electricity_kW, fill = measurement_description)) +
geom_bar(stat = "identity", position = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=0.5,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power Consumption & Import (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Solar Power & Whole home import Power consumption") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
geom_hline(yintercept = 0)
p
par(oldpar)
dev.off()
######### TIME TREND #########################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_Solar_Power_TC5.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = SolarPV_Profile_hour,
aes(hour, Sum_Electricity_kW, fill = hour)) +
geom_bar(stat = "identity", position = "identity") + facet_grid(. ~ measurement_description) + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend for Solar and Whole Home power import") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20)) +
geom_hline(yintercept = 0)
q
par(oldpar)
dev.off()
####### CARBON DATA data for Solar Panel users #############################
############################################################################
jpeg('C:/NetworkRevolution/Plots/System_Size_Peak_Power_SolarPV_TC5.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Carbon_ID_TC5,
aes(measurement_description, System_Size_kW_peak, fill = measurement_description)) +
geom_bar(stat = "identity") + facet_grid(. ~ System_Size_kW_peak) + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=2)) +
theme(axis.text.x=element_text(size=20,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Peak Power (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Peak Power of solar PV panels") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
p
par(oldpar)
dev.off()
################################################################################
################################################################################
############################## TC6 #############################################
################################################################################
################################################################################
EV_Profile_ID <- subset(EV_Profile_ID, !is.na(Sum_Electricity_kWh))
EV_Profile_hour <- subset(EV_Profile_hour, !is.na(Sum_Electricity_kWh))
jpeg('C:/NetworkRevolution/Plots/Electric_Vehicles_User_Power_Consumption_TC6.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = EV_Profile_ID,
aes(measurement_description, Sum_Electricity_kWh, fill = measurement_description)) +
geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=0.5,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Electric Vehicles Power User Consumption") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
######### TIME TREND #########################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_Electric_Vehicles_User_Consumption_TC6.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = EV_Profile_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + facet_grid(. ~ measurement_description) + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend of Electric Vehicles Power Consumption") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
q
par(oldpar)
dev.off()
################################################################################
################################################################################
############################## TC9a ############################################
################################################################################
################################################################################
Smart_Profile_ID <- subset(Smart_Profile_ID, !is.na(mosaic_class))
Smart_Profile_hour <- subset(Smart_Profile_hour, !is.na(mosaic_class))
jpeg('C:/NetworkRevolution/Plots/Cumulative_Smart_ID_TC9a.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Smart_Profile_ID,
aes(mosaic_class, Sum_Electricity_kWh, fill = mosaic_class)) +
geom_bar(stat = "identity") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr. Cons. (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Electricity Consumption (Smart Profiles)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
########## TIME TREND #########################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_all_Classes_Smart_Profiles(single)_TC9a.jpg',
quality = 100, bg = "white", res = 200, width = 18, height = 11, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = Smart_Profile_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + facet_wrap( ~ mosaic_class, ncol = 4) + ##scales="free_y"
guides(fill=FALSE) +
theme( strip.text = element_text(size = 15)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "red")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend of Consumption by class (Smart Profiles)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
q
par(oldpar)
dev.off()
################################################################################
################################################################################
############################## TC20_Auto #############################################
################################################################################
################################################################################
SolarPV_Auto_ID <- subset(SolarPV_Auto_ID, !is.na(Sum_Parameter))
#### Rename measurement description ##################################
SolarPV_Auto_ID$measurement_description <- ifelse(grepl("Demand current",SolarPV_Auto_ID$measurement_description , ignore.case = TRUE),
"Demand current [A]", SolarPV_Auto_ID$measurement_description)
SolarPV_Auto_ID$measurement_description <- ifelse(grepl("Maximum current exported",SolarPV_Auto_ID$measurement_description , ignore.case = TRUE),
"Maximum current exported [A]", SolarPV_Auto_ID$measurement_description)
SolarPV_Auto_ID$measurement_description <- ifelse(grepl("Supply voltage",SolarPV_Auto_ID$measurement_description , ignore.case = TRUE),
"Supply voltage [V]", SolarPV_Auto_ID$measurement_description)
SolarPV_Auto_ID$measurement_description <- ifelse(grepl("Photovoltaic meter",SolarPV_Auto_ID$measurement_description , ignore.case = TRUE),
"Photovoltaic meter [kWh]", SolarPV_Auto_ID$measurement_description)
SolarPV_Auto_hour <- subset(SolarPV_Auto_hour, !is.na(Sum_Parameter))
SolarPV_Auto_hour$measurement_description <- ifelse(grepl("Demand current",SolarPV_Auto_hour$measurement_description , ignore.case = TRUE),
"Demand current [A]", SolarPV_Auto_hour$measurement_description)
SolarPV_Auto_hour$measurement_description <- ifelse(grepl("Maximum current exported",SolarPV_Auto_hour$measurement_description , ignore.case = TRUE),
"Maximum current exported [A]", SolarPV_Auto_hour$measurement_description)
SolarPV_Auto_hour$measurement_description <- ifelse(grepl("Supply voltage",SolarPV_Auto_hour$measurement_description , ignore.case = TRUE),
"Supply voltage [V]", SolarPV_Auto_hour$measurement_description)
SolarPV_Auto_hour$measurement_description <- ifelse(grepl("Photovoltaic meter",SolarPV_Auto_hour$measurement_description , ignore.case = TRUE),
"Photovoltaic meter [kWh]", SolarPV_Auto_hour$measurement_description)
jpeg('C:/NetworkRevolution/Plots/Solar_PV_auto_hot_water_charging_TC20_Auto.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = SolarPV_Auto_ID,
aes(measurement_description, Sum_Parameter, fill = measurement_description)) +
geom_bar(stat = "identity", position = "identity") +
# facet_wrap( ~ measurement_description, ncol = 4) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=20,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Consumption") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Consumption from solar PV with automatic hot water charging") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
p
par(oldpar)
dev.off()
######### TIME TREND #########################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_Solar_PV_auto_hot_water_charging_TC20_Auto.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = SolarPV_Auto_hour,
aes(hour, Sum_Parameter, fill = hour)) +
geom_bar(stat = "identity") +
facet_wrap( ~ measurement_description, ncol = 2, scales="free_y") + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Consumption") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Consumption from solar PV with automatic hot water charging") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
q
par(oldpar)
dev.off()
#################################################################################
################################################################################
############################## TC20IHD ############################################
################################################################################
################################################################################
jpeg('C:/NetworkRevolution/Plots/Cumulative_SolarPV_manual_control_TC20IHD.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = SolarPV_Manual_ID,
aes(measurement_description, Sum_Electricity_kW, fill = measurement_description)) +
geom_bar(stat = "identity", position = "identity") +
theme(axis.text.x=element_text(angle=0,hjust=0.5,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power Consumption & Import (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Solar Power & Whole home import Power (Manual Control)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 15)) +
geom_hline(yintercept = 0)
p
par(oldpar)
dev.off()
######### TIME TREND #########################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_Solar_PV_manual_control_TC20IHD.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = SolarPV_Manual_hour,
aes(hour, Sum_Electricity_kW, fill = hour)) +
geom_bar(stat = "identity", position = "identity") + facet_grid(. ~ measurement_description) + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Power (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Time Trend for Solar and Whole Home power import(Manual Control)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20)) +
geom_hline(yintercept = 0)
q
par(oldpar)
dev.off()
####### cARBON DATA data for Solar Panel users #############################
############################################################################
jpeg('C:/NetworkRevolution/Plots/Peak_Power_SolarPV_Manual_Control_TC20IHD.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = Carbon_TC20HID,
aes(measurement_description, System_Size_kW_peak, fill = measurement_description)) +
geom_bar(stat = "identity") + facet_grid(. ~ System_Size_kW_peak) + guides(fill=FALSE) +
theme( strip.text = element_text(size = 18)) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=13,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Peak Power (kW)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Peak Power of solar PV panels (Manual Control)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 20))
p
par(oldpar)
dev.off()
################################################################################
################################################################################
############################## TC1b ############################################
################################################################################
################################################################################
SMEs_ID <- subset(SMEs_ID, !is.na(sub_group))
SMEs_hour <- subset(SMEs_hour, !is.na(sub_group))
SMEs_ID <- subset(SMEs_ID, !is.na(size))
SMEs_hour <- subset(SMEs_hour, !is.na(size))
SMEs_ID <- subset(SMEs_ID, !is.na(Sum_Electricity_kWh))
SMEs_hour <- subset(SMEs_hour, !is.na(Sum_Electricity_kWh))
## Plot data
SMEs_hour %>%
filter(location_id == 100736) %>% ggplot(aes(hour, Sum_Electricity_kWh)) + geom_line()
jpeg('C:/NetworkRevolution/Plots/Cumulative_Electricity_SMEs_ID_TC1b.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
p <- ggplot(data = SMEs_ID,
aes(sector, Sum_Electricity_kWh, fill = sector)) +
geom_bar(stat = "identity") +
facet_wrap( ~ size, scales="free_y") + guides(fill=FALSE) +
theme( strip.text = element_text(size = 15)) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr. Cons. (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Electricity Consumption (SMEs)") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
p
par(oldpar)
dev.off()
########## TIME TREND #########################################################
jpeg('C:/NetworkRevolution/Plots/Total_SMEs_hour_TC1b.jpg',
quality = 100, bg = "white", res = 200, width = 10, height = 7, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = SMEs_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + guides(fill=FALSE) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=13),
axis.text.y = element_text(angle=0, vjust=0.5, size=13)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=13),
axis.text.x = element_text(angle=0, vjust=0.5, size=13)) +
ggtitle("Total Property consumption for SMEs") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
q
par(oldpar)
dev.off()
###############################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_by_Sector_SMEs_TC1b.jpg',
quality = 100, bg = "white", res = 200, width = 18, height = 11, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = SMEs_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + facet_wrap( ~ sector, ncol = 2) +
guides(fill=FALSE) +
theme( strip.text = element_text(size = 30)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=15,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=15),
axis.text.y = element_text(angle=0, vjust=0.5, size=15)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=15),
axis.text.x = element_text(angle=0, vjust=0.5, size=20)) +
ggtitle("Time Trend of Consumption by Sector (SMEs)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 30))
q
par(oldpar)
dev.off()
#####################################################################
jpeg('C:/NetworkRevolution/Plots/Time_Trend_by_Size_SMEs_TC1b.jpg',
quality = 100, bg = "white", res = 200, width = 18, height = 11, units = "in")
par(mar=c(4, 10, 9, 2) + 0.3)
oldpar <- par(las=1)
q <- ggplot(data = SMEs_hour,
aes(hour, Sum_Electricity_kWh, fill = hour)) +
geom_bar(stat = "identity") + facet_wrap( ~ size, ncol = 4) +
guides(fill=FALSE) +
theme( strip.text = element_text(size = 30)) +
theme(axis.text.x=element_text(angle=0,hjust=1,vjust=0.5)) +
theme(axis.text.x=element_text(size=12,face="bold", colour = "black")) +
theme(axis.title.x = element_blank()) + # Remove x-axis label
ylab("Cumulative Electr.Consumption (kWh)") + # Set y-axis label
theme(axis.title.y = element_text(face="bold", colour="#990000", size=15),
axis.text.y = element_text(angle=0, vjust=0.5, size=15)) +
xlab("hour") + # Set y-axis label
theme(axis.title.x = element_text(face="bold", colour="#990000", size=15),
axis.text.x = element_text(angle=0, vjust=0.5, size=20)) +
ggtitle("Time Trend of Consumption by Size (SMEs)") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 25))
q
par(oldpar)
dev.off()
|
40b606245b9b85a88fd51afd39f7cb41c02d32dc
|
c79d865438aaa879e97f1082a3d36e4f61172e44
|
/complete.R
|
022f7eb74ead4276e134df61c74d7fe71b7abdd0
|
[] |
no_license
|
srinivasmurthyps/datasciencecoursera
|
8f77ffc5b25ff441f41bfba50ace9bb6b049e231
|
6497e4add2a61a4aecaf1b292a7e0cefc48069b9
|
refs/heads/master
| 2022-12-15T18:14:01.181098
| 2020-09-19T23:06:56
| 2020-09-19T23:06:56
| 293,680,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
complete.R
|
complete <- function (directory, id) {
output_df <- data.frame("id" = numeric(0), "nobs"=numeric(0))
count <- 1
for (i in id) {
i_modified <- formatC(i, width=3, flag="0")
i_modified <- paste(i_modified, ".csv", sep = "")
file_name <- paste(directory, i_modified, sep = "")
df <- read.csv(file_name)
df <- df[complete.cases(df), ]
total_records <- NROW(df)
output_df[count, ] <- c(i, total_records)
count <- count + 1
}
return(output_df)
}
|
1f783a67e359c0e4f85ee7b9595edd1d03481880
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131652-test.R
|
cbf72e7f317a9b025be8581d254243ae22c9fb82
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 888
|
r
|
1610131652-test.R
|
testlist <- list(a = -65025L, b = -15007745L, x = c(-1L, -65281L, -48641L, -65281L, -16777216L, 255L, 100925439L, 116916223L, 926365495L, 939523881L, -250L, -1L, -230L, -1L, -250L, 50331647L, -14804225L, -16711680L, 63996L, -114819298L, -2145510657L, -62721L, -1L, -58880L, -604029440L, 16777211L, -16835046L, -16711717L, -1L, 100794623L, -1L, 1073686302L, 452984831L, -1L, -1L, -41473L, -16711681L, -11141121L, -16711681L, -1L, -58854L, 14408576L, 41L, -393991L, 656899072L, 58082L, -503316480L, 704623103L, -226L, 452984831L, -1L, -215L, -250L, -54754L, -1342177281L, -1L, -1L, -163L, 100859903L, -262401L, 1851129855L, -16320513L, -57830L, -65281L, -14804225L, -1L, -58625L, -1L, -63998L, -1L, -250L, 45088767L, -193L, -226L, 452929310L, 450625535L, -67174546L, 1429143551L, 505085951L, 1511660287L, 520060954L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
fb52131c02beaeb1bc8489618040f7bfa941c2a2
|
371064d7042c999784982112a77a2e6aa3b7da40
|
/pollutantmean.R
|
63b7e7f10c463e65470822b215df44ace3bc9c20
|
[] |
no_license
|
atenasadeghi/datasciencecoursera
|
81906ae6752f30fe53754bdf0ac3a6ec35033021
|
f89126799e4fa7a26bd6672a4c958ff58d2c3c16
|
refs/heads/master
| 2020-05-30T04:05:36.865684
| 2017-11-28T01:24:30
| 2017-11-28T01:24:30
| 40,325,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 581
|
r
|
pollutantmean.R
|
pollutantmean<-function(directory, pollutant, Id=1:332){
summation<-0
numberofrows<-0
for (n in Id){
if (n <10) {
filepath<-paste(directory, "/" ,"00" ,n, ".csv", sep="")
}
else{
if (n<100) {filepath<-paste(directory,"/","0",n, ".csv", sep="")}
else {
filepath<-paste(directory,"/",n,".csv", sep="")
}
}
dataexam1<-read.csv(filepath)
summation<-summation + sum(dataexam1[,pollutant],na.rm=TRUE)
numberofrows<-numberofrows+NROW(na.omit(dataexam1[,pollutant]))
}
summation/numberofrows
}
|
acba85f2a907e42c488584e7da18552eb1c71395
|
912ca5b887910e9f8f449a5aaa54688dc4ec9e54
|
/Bond prices/R script Bonds prices - tree-based methods.R
|
ffac48ccae6ea6dd26aa0734e512f00b55f8a428
|
[] |
no_license
|
bwalwyn/CV
|
a7bb60aa0f09517cbdd72640a45546e993ccb077
|
e12dc24c3bcaf3f3e9b7f20710a87b3a6f0022b1
|
refs/heads/master
| 2021-05-31T21:23:02.813951
| 2016-06-09T20:58:17
| 2016-06-09T20:58:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,784
|
r
|
R script Bonds prices - tree-based methods.R
|
#Ben Walwyn
#Analytics project R
#04/09/14
#----------------------Part 1------------------------#
setwd("F:/Analytics")
install.packages("tree")
install.packages("MASS")
install.packages("randomForest")
install.packages("doParallel")
install.packages("foreach")
install.packages("ipred")
install.packages("gbm")
install.packages("DAAG")
library(DAAG)
library(gbm)
library(tree)
library(MASS)
library(randomForest)
library(doParallel)
library(foreach)
library(ipred)
library(stats)
options(scipen=999)
bond <- read.csv("bond25.csv",header=T)
newdata<-data.frame(bond$trade_price,bond$trade_price_last1,bond$trade_price_last2,
bond$trade_price_last3,bond$trade_price_last4,bond$trade_price_last5,
bond$trade_price_last6,bond$trade_price_last7,bond$trade_price_last8,
bond$trade_price_last9,bond$trade_price_last10)
#--- Exploratory analysis ---#
par(mfrow=c(1,2))
#variables
median(bond$trade_price)
length(bond)
#histogram of response
hist(bond$trade_price,breaks=75,col="lightblue",freq=F,xlab='Trade Price',main='Histogram of trade price')
#trade series
plot(bond$trade_price,type='l',xlab='Trade number',ylab='Trade Price',main='Series of trade prices',col='black')
abline(a=quantile(bond$trade_price,0.025),b=0,col='red',lty=2)
abline(a=quantile(bond$trade_price,0.975),b=0,col='red',lty=2)
#correlation with prevuious trades
cor(newdata,bond$trade_price)
#plotting explanatory variables
par(mfrow=c(2,2))
par("oma"=c(0,0,2,0))
plot(bond$trade_price_last1,bond$trade_price)#strong relationship
plot(bond$trade_size,bond$trade_price)
plot(bond$current_coupon,bond$trade_price)#spreads out
plot(bond$time_to_maturity,bond$trade_price)#an extremely long term bond
mtext("Plots of relationships to trade price",outer=T,side=3,cex=1.5)
par(mfrow=c(1,1))
par("oma"=c(0,0,0,0))
#boxplots of relationships
par(mfrow=c(1,2))
par("oma"=c(0,0,2,0))
boxplot(bond$trade_price~as.factor(bond$is_callable),xlab='is_callable',ylab='trade_price')
boxplot(bond$trade_price~as.factor(bond$trade_type_last10),xlab='trade_type')
mtext('Boxplots of relationship of trade price to factor variables',outer=T,side=3,cex=1.5)
par(mfrow=c(1,1))
par("oma"=c(0,0,0,0))
### (1) Regression tree
full_tree <- tree(bond$trade_price~., data=bond,control=tree.control(nrow(bond),mindev=0))
summary(full_tree)
plot(full_tree)
stop_tree <- tree(bond$trade_price~., data=bond, control=tree.control(nrow(bond),mindev=0.004)) #defaults
summary(stop_tree)
plot(stop_tree)
text(stop_tree,cex=0.6)
#--- Cross-val on full tree
cross_val<-cv.tree(full_tree,K=100)
#plot the cross validation error against no. of terminal nodes and the tuning parameter
plot(cross_val$size,round(cross_val$dev),type='b',xlab="Number of terminal nodes",ylab="CV error",xlim=c(0,20),cex.lab=0.8,col="blue",cex.axis=0.8)
alpha<-round(cross_val$k)
axis(3, at=cross_val$size, lab=alpha, cex.axis=0.8)
mtext(expression(alpha), side=3,line=2, col="blue")
mtext(expression(bold("Regression tree cross-validation")),3,3,)
#optimum number of terminal nodes, minimzing overfitting
abline(v=9,col="red",lty=2)
#the selected value for the tuning parameter
best_alpha<-cross_val$k[cross_val$size==9]
#--- Tree Pruning
tree_prune <- prune.tree(full_tree, k=best_alpha+0.1)
#Interpretation
plot(tree_prune)
text(tree_prune, cex=0.6,col="darkgreen")
mtext(expression(paste("Plot of regression tree tuned to ", alpha, "=4420")),3,line=2,col="blue")
summary(tree_prune)
### (2) Bagging
#default #trees is 500
B <- 500 #bag samples and trees
core <- 5 #cores to be used for parallel
#Perform bagging in parallel
cl <- makeCluster(core)
registerDoParallel(cl)
getDoParWorkers()
t_bagpar<-system.time({ bag_dopar<-foreach(i=1:core, .combine="combine", .packages="randomForest") %dopar% {
randomForest(bond$trade_price~., data=bond, mtry=57, ntree=B/core, importance=TRUE)
}
})
mse_parallel <- mean((bag_dopar$predicted-bond$trade_price)^2)
stopCluster(cl)
#Bagging without parallel
t_bag<-system.time({ bag<-randomForest(bond$trade_price~., data=bond, mtry=57, ntree=500, importance=TRUE, na.action=na.exclude)})
### (3) Random forests
#default number of variables randomized in each permutation (deafult=19)
t_for20<-system.time({ forest_mdef<-randomForest(bond$trade_price~., data=bond, ntree=500, importance=TRUE, na.action=na.exclude)})
t_for5<-system.time({ forest_m5<-randomForest(bond$trade_price~., data=bond, mtry=5, ntree=500, importance=TRUE, na.action=na.exclude)})
t_for30<-system.time({ forest_m30<-randomForest(bond$trade_price~., data=bond, mtry=30, ntree=500, importance=TRUE, na.action=na.exclude)})
#parallel
cl2 <- makeCluster(core)
registerDoParallel(cl2)
t_for30par<-system.time({ forest_dopar<-foreach(i=1:core, .combine="combine", .packages="randomForest") %dopar% {
randomForest(bond$trade_price~., data=bond,mtry=30, ntree=B/core, importance=TRUE)
}
})
stopCluster(cl2)
attributes(forest_dopar)
### (4) Boosting
t_boostd1<-system.time({boost_d1_l1=gbm(bond$trade_price~., data=bond, distribution='gaussian',n.trees=1000, interaction.depth=1, shrinkage=0.1, cv.folds=10, bag.fraction=1, n.minobsinnode=10, n.core=7)})
system.time({boost_d1_l2=gbm(bond$trade_price~., data=bond, distribution='gaussian',n.trees=1000, interaction.depth=1, shrinkage=0.01, cv.folds=10, bag.fraction=1, n.minobsinnode=10, n.core=7)})
system.time({boost_d1_l3=gbm(bond$trade_price~., data=bond, distribution='gaussian',n.trees=1000, interaction.depth=1, shrinkage=0.001, cv.folds=10, bag.fraction=1, n.minobsinnode=10, n.core=7)})
t_boostd2<-system.time({boost_d2_l1=gbm(bond$trade_price~., data=bond, distribution='gaussian',n.trees=1000, interaction.depth=2, shrinkage=0.1, cv.folds=10, bag.fraction=1, n.minobsinnode=10, n.core=7)})
system.time({boost_d2_l2=gbm(bond$trade_price~., data=bond, distribution='gaussian',n.trees=1000, interaction.depth=2, shrinkage=0.01, cv.folds=10, bag.fraction=1, n.minobsinnode=10, n.core=7)})
system.time({boost_d2_l3=gbm(bond$trade_price~., data=bond, distribution='gaussian',n.trees=1000, interaction.depth=2, shrinkage=0.001, cv.folds=10, bag.fraction=1, n.minobsinnode=10, n.core=7)})
#--- Tuning parameter selection ---#
#pruning -> alpha selected best using graph
#bag: how many trees to fit
#forests
#comparison plot for m
plot(forest_m30,col="darkgreen",main="Errors: Bagging and Random Forests")
lines(forest_mdef$mse,col="blue")
lines(forest_m5$mse,col='red')
lines(bag$mse,col='purple')
legend("topright",c("m=5","m=20 (default)","m=30",'m=57 (bag)'),lty=c(1,1,1,1),col=c("red","blue","darkgreen",'purple'))
forest_mdef$mse[500] #fastest reduction in error
forest_m5$mse[500]
forest_m30$mse[500] #best but slower
bag$mse[500]
#boost
min(boost_d2_l1$cv.error)
par(mfrow=c(1,2))
par("oma"=c(1,1,2,0))
plot(boost_d1_l1$cv.error, type="l", ylab="CV error",ann=FALSE,lwd=2)
lines(boost_d1_l2$cv.error,col="blue",lwd=2)
lines(boost_d1_l3$cv.error,col="red",lwd=2)
abline(cve,0,lty=2,col="darkgreen")
mtext("d=1", side=3,line=0)
plot(boost_d2_l1$cv.error, type="l",ann=FALSE,lwd=2)
abline(cve,0,lty=2,col="darkgreen")
text(800, cve+5, "regression tree")
lines(boost_d2_l2$cv.error,col="blue",lwd=2)
lines(boost_d2_l3$cv.error,col="red",lwd=2)
legend('topright',
legend=c(expression(lambda~"= 0.1"),expression(lambda~"= 0.01"),expression(lambda~"= 0.001")),
col=c("black","blue", "red"),
lwd=2,
bty='n')
mtext("d=2", side=3,line=0)
mtext("Boosted Regression Tree", outer = TRUE,side=3, cex = 1.5)
mtext("Number of trees", outer = TRUE,side=1, cex = 1)
mtext("CV error", outer = TRUE,side=2, cex = 1)
par("oma"=c(0,0,0,0))
par(mfrow=c(1,1))
#at the endpoints there d=2 and lambda =0.1 is the best.
#Possibly more trees need to be fitted
#--- Comparison of methods ---#
#OOB mse plot
#single tree(dashed line on cv error),bagging,random forest,boost
plot(bag,col="blue",main="Comparison of methods")
lines(forest_mdef$mse, col="darkgreen")
lines(boost_d2_l1$cv.error,col="red")
abline(v=which(boost_d2_l1$cv.error==min(boost_d2_l1$cv.error)),col="black",lty=2)
legend("topright", legend=c("Bagging", "Random Forest","Boosting"), col=c("blue", "darkgreen","red"), lwd=2,bty='n')
#Actual vs. predicted
#names
par(mfrow=c(2,2))
par("oma"=c(0,0,2,0))
plot(bond$trade_price,predict(tree_prune))
abline(1,1,col='red')
plot(bond$trade_price,predict(bag))
abline(1,1,col='red')
plot(bond$trade_price,predict(forest_mdef))
abline(1,1,col='red')
plot(bond$trade_price,predict(boost_d2_l1,n.trees=500))
abline(1,1,col='red')
mtext("Actual vs. predicted",side=3,outer=T)
par("oma"=c(0,0,0,0))
par(mfrow=c(1,1))
#boosted prediction expected to get close on the whole data set as the following tree is grown on the residuals, thereby reducing them.
#--- Interpretation ---#
#Variable Importance and partial plots
#bag
par("oma"=c(0,4,0,0))
colblue <- colorRampPalette(c("lightblue", "blue"))
barplot(sort(bag$importance[,2])[50:57],offset=200,cex.names=0.6,horiz=T,col=colblue(8),las=1,xpd=F,main="Bag Variable Importance",xlab="Node Imp")
par("oma"=c(0,0,2,0))
par(mfrow=c(1,2))
partialPlot(bag,bond,trade_price_last1,cex.main=0.8)
partialPlot(bag,bond,curve_based_price,cex.main=0.8)
partialPlot(bag,bond,trade_price_last10)
#random forest
colgreen <- colorRampPalette(c("green", "darkgreen"))
barplot(sort(forest_m30$importance[,2])[50:57],offset=200,cex.names=0.6,horiz=T,col=colgreen(8),las=1,xpd=F,main="RF Variable Importance",xlab="Node Imp")
partialPlot(forest_m30,bond,trade_price_last1,cex.main=0.8)
partialPlot(forest_m30,bond,curve_based_price,cex.main=0.8)
partialPlot(forest_m30,bond,trade_price_last10)
#boosting
colred <- colorRampPalette(c("orange", "red"))
barplot(sort(summary(boost_d2_l1)[1:8,2]),names.arg=summary(boost_d2_l1)[8:1,1],cex.names=0.6,las=1,horiz=T,col=colred(8),main="Boosted Variable Importance",xlab="Relative Importance")
plot(boost_d2_l1,i=c("trade_price_last1",'curve_based_price'))
plot(boost_d2_l1,i=c("trade_price_last1"))
plot(boost_d2_l1,i=c("curve_based_price"))
mtext(expression(bold('Partial plots for boosting')),side=3,outer=T)
par("oma"=c(0,0,0,0))
par(mfrow=c(1,1))
#most important variable are k asat traded prices and the j last curved prices, indicates an autoregressive model is suitable
#--- Parallel Computing and Time components ---#
#t's
#--- (b) Linear Model ---#
cl <- makeCluster(7)
registerDoParallel(cl)
ms<-foreach(i=1:10,.combine='c', .packages="DAAG") %dopar% {
fit <- lm(bond.trade_price~.,data=newdata[,1:(i+1)])
cv<-cv.lm(fit, df=newdata[,1:(i+1)],printit=T,plotit=F,m=10)
attributes(cv)$ms
}
stopCluster(cl)
plot(seq(1:10),unlist(ms),type='b',main="Plot of CV error against autoregressive order",xlab='Order',ylab='cv.error',col='brown')
unlist(ms)
|
2ac8556d55bc5f5ac128c350b4a57624e117d3bc
|
7fb057028dfc91b81f98bbef71f4fb62223d5520
|
/cars_plot.R
|
5882c04dbbad6a2effc872a38bdc0952fcfaae6d
|
[] |
no_license
|
Defra-Data-Science-Centre-of-Excellence/rmarkdown-plots
|
adea58fd702bafda92b8f593d529884e5cbfa065
|
4ffcc17974d7bab6c5e4ec168d2ef7c133b6a3ed
|
refs/heads/main
| 2023-02-23T07:35:02.803618
| 2021-01-29T15:20:10
| 2021-01-29T15:20:10
| 334,132,569
| 0
| 0
| null | 2021-01-29T15:20:11
| 2021-01-29T12:02:28
|
R
|
UTF-8
|
R
| false
| false
| 140
|
r
|
cars_plot.R
|
#' Basic function to plot mtcars data
cars_plot <- function(){
plot(mtcars$mpg, mtcars$cyl, main = "Car fuel efficiency (MPG Vs CYL)")
}
|
7e67347ed8ae8ca828f73a849a6c38ded0bc0fc5
|
0186f97cba792fafa20dffe0a3dc431838495c30
|
/R/session.R
|
c267c7712d9d6325c762778ffa51517c1f9def0a
|
[
"Apache-2.0"
] |
permissive
|
nagyistge/opencpu
|
8f78d6015ad7217aa1b2a6c6450be93de997d08c
|
0d664a5e2e934e184022f7c6a4cd4a373a253250
|
refs/heads/master
| 2021-01-20T03:17:01.826815
| 2017-04-15T14:23:49
| 2017-04-15T14:23:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,503
|
r
|
session.R
|
#create the regex to identify session keys
session_regex <- function(){
paste0("^x[0-9a-f]{", config("key.length") + 1, "}$")
}
#' @importFrom openssl rand_bytes
session <- local({
#generates a random session hash
generate <- function(){
while(file.exists(sessiondir(
hash <- paste0("x0", substring(paste(rand_bytes(config("key.length")), collapse=""), 1, config("key.length")))
))){}
hash
}
#copies a session dir
fork <- function(oldhash){
olddir <- sessiondir(oldhash);
forkdir <- tempfile("fork_dir");
stopifnot(dir.create(forkdir));
file.copy(olddir, forkdir, recursive=TRUE);
stopifnot(identical(list.files(forkdir), basename(olddir)));
newhash <- generate();
newdir <- sessiondir(newhash);
stopifnot(file.rename(list.files(forkdir, full.names=TRUE), newdir));
newhash
}
#evaluates something inside a session
eval <- function(input, args, storeval=FALSE, format="list"){
#create a temporary dir
execdir <- tempfile("ocpu_session_");
stopifnot(dir.create(execdir));
setwd(execdir);
#copy files to execdir
lapply(req$files(), function(x){
stopifnot(file.copy(x$tmp_name, basename(x$name)))
});
#setup handler
myhandler <- evaluate::new_output_handler(value=function(myval, visible=TRUE){
if(isTRUE(storeval)){
assign(".val", myval, sessionenv);
}
if(isTRUE(visible)){
#note: print can be really, really slow
if(identical(class(myval), "list")){
cat("List of length ", length(myval), "\n");
cat(paste("[", names(myval), "]", sep="", collapse="\n"));
} else {
from("evaluate", "render")(myval);
}
}
invisible();
});
#create session for output objects
if(missing(args)){
args <- new.env(parent=globalenv())
} else {
args <- as.environment(args);
parent.env(args) <- globalenv();
}
#initiate environment
sessionenv <- new.env(parent=args);
#need to do this before evaluate, in case evaluate uses set.seed
hash <- generate();
# This is used by 'evaluate'
options(device = function(file, width, height, paper, ...){
grDevices::pdf(NULL, width = 11.69, height = 8.27, paper = "A4r", ...)
par("bg" = "white")
})
# In OpenCPU 1.x this was executed inside another fork with a stricter apparmor profile
output <- evaluate::evaluate(input = input, envir = sessionenv, stop_on_error = 2, output_handler = myhandler);
#in case code changed dir
setwd(execdir)
#unload session namespaces, otherwise sessionInfo() crashes
unload_session_namespaces()
#store output
save(file=".RData", envir=sessionenv, list=ls(sessionenv, all.names=TRUE), compress=FALSE);
saveRDS(output, file=".REval", compress=FALSE);
saveRDS(utils::sessionInfo(), file=".RInfo", compress=FALSE);
saveRDS(.libPaths(), file=".Rlibs", compress=FALSE);
saveDESCRIPTION(hash)
#does not work on windows
#stopifnot(file.rename(execdir, sessiondir(hash)));
#store results permanently
outputdir <- sessiondir(hash);
#First try renaming to destionation directory
if(!isTRUE(file.rename(execdir, outputdir))){
#When rename fails, try copying instead
suppressWarnings(dir.create(dirname(outputdir)));
stopifnot(file.copy(execdir, dirname(outputdir), recursive=TRUE));
setwd(dirname(outputdir));
stopifnot(file.rename(basename(execdir), basename(outputdir)));
unlink(execdir, recursive=TRUE);
}
#Shortcuts to get object immediately
if(format %in% c("json", "print", "pb")){
sendobject(hash, get(".val", sessionenv), format);
} else if(format %in% c("console")) {
sendobject(hash, extract(output, format), "text");
} else {
#default: send 201 with output list.
sendlist(hash)
}
}
sendobject <- function(hash, obj, format){
tmppath <- sessionpath(hash);
outputpath <- paste0(req$uri(), tmppath, "/");
res$setheader("Location", outputpath);
res$setheader("X-ocpu-session", hash)
httpget_object(obj, format, "object");
}
#redirects the client to the session location
sendlist <- function(hash){
tmppath <- sessionpath(hash);
path_absolute <- paste0(req$uri(), tmppath, "/");
path_relative <- paste0(req$mount(), tmppath, "/");
outlist <- index(sessiondir(hash));
text <- paste(path_relative, outlist, sep="", collapse="\n");
res$setheader("Content-Type", 'text/plain; charset=utf-8');
res$setheader("X-ocpu-session", hash)
res$redirect(path_absolute, 201, text)
}
#get a list of the contents of the current session
index <- function(filepath){
#verify session exists
stopifnot(issession(filepath))
#set the dir
setwd(filepath)
#outputs
outlist <- vector();
#list data files
if(file.exists(".RData")){
myenv <- new.env();
load(".RData", myenv);
if(length(ls(myenv, all.names=TRUE))){
outlist <- c(outlist, paste("R", ls(myenv, all.names=TRUE), sep="/"));
}
}
#list eval files
if(file.exists(".REval")){
myeval <- readRDS(".REval");
if(length(extract(myeval, "graphics"))){
outlist <- c(outlist, paste("graphics", seq_along(extract(myeval, "graphics")), sep="/"));
}
if(length(extract(myeval, "message"))){
outlist <- c(outlist, "messages");
}
if(length(extract(myeval, "text"))){
outlist <- c(outlist, "stdout");
}
if(length(extract(myeval, "warning"))){
outlist <- c(outlist, "warnings");
}
if(length(extract(myeval, "source"))){
outlist <- c(outlist, "source");
}
if(length(extract(myeval, "console"))){
outlist <- c(outlist, "console");
}
}
#list eval files
if(file.exists(".RInfo")){
outlist <- c(outlist, "info");
}
#other files
sessionfiles <- file.path("files", list.files(recursive=TRUE))
if(length(sessionfiles)){
outlist <- c(outlist, sessionfiles)
}
return(outlist);
}
#actual directory
sessiondir <- function(hash){
file.path(gettmpdir(), "tmp_library", hash);
}
#http path for a session (not actual file path!)
sessionpath <- function(hash){
paste("/tmp/", hash, sep="");
}
#test if a dir is a session
issession <- function(mydir){
any(file.exists(file.path(mydir, c(".RData", ".REval"))));
}
environment();
});
|
d002205ecb210a33b0fbb8d523acf1d6905e0f89
|
1343b3fa694cecdb438a341c729230f35d6095ca
|
/test script.R
|
5b1ef3ec3cb7a9d6a6823c679999e6ac0e583595
|
[] |
no_license
|
aldosterone/IntroToGit
|
cf365fbb6ccd56275c69d8a7efaa33dbc0365f09
|
2c496696799f6f81ee3fdf96b74009040a9ac33c
|
refs/heads/master
| 2022-11-21T07:22:05.373983
| 2020-07-27T17:22:49
| 2020-07-27T17:22:49
| 282,958,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27
|
r
|
test script.R
|
# simple script
x = 9
x - 7
|
aa1edd2d697b8d6a832bd0cfc75aa5b290ef3dcc
|
35d97acdeb97180f1bb4c3bdbd232a7bcbe17692
|
/examples/stLL.R
|
804ed2e8129f4c6e7516140097ce8d61dcbcb26d
|
[] |
no_license
|
jmhewitt/telefit
|
1b9d7e80a423689a83b584c5611035f874895485
|
6815ca6f472cf4d7d933b55823ad5349ad471073
|
refs/heads/master
| 2021-03-22T03:29:12.284545
| 2020-02-03T19:15:28
| 2020-02-03T19:15:28
| 62,270,043
| 1
| 2
| null | 2019-02-19T22:32:22
| 2016-06-30T01:41:06
|
R
|
UTF-8
|
R
| false
| false
| 450
|
r
|
stLL.R
|
library(dplyr)
library(foreach)
library(itertools)
set.seed(2018)
data("coprecip")
data("coprecip.fit")
attach(coprecip)
ests = coef(coprecip.fit, burn = 50)
ll = stLL(stData = coprecip, stFit = coprecip.fit,
beta = matrix(ests$beta, ncol = 2),
sigmasq_y = ests$sigmasq_y, sigmasq_r = ests$sigmasq_r,
sigmasq_eps = ests$sigmasq_eps,
rho_y = ests$rho_y, rho_r = ests$rho_r,
sigmasq_r_eps = 0)
|
a564bb36ab2c6515d482ae009b7adac72414b8d5
|
d3c76f9d023e644faaa78e96930265adb39891c3
|
/AodToPm_Offline.R
|
1deb403d6834d76b7204c3f88882a749bba88b96
|
[] |
no_license
|
JieLuoybfq/Data-Merging-and-Interpolation-Methods-MODIS-Terra-and-Aqua-Daily-Aerosol-Case
|
32d797882860c1e2dc0fd76c582336c94adc776d
|
c6e975abfef92e765a1a29c4466ee46415ecb0b1
|
refs/heads/master
| 2021-06-23T07:26:51.533089
| 2017-08-30T14:53:17
| 2017-08-30T14:53:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,427
|
r
|
AodToPm_Offline.R
|
# TODO: create PM image from aod and temp (PM regression)
#
# Author: phamha
###############################################################################
#import library
library(gstat)
library(base)
library(RPostgreSQL)
library(stringr)
library(raster)
library(gdalUtils)
library(rgdal)
library(rPython)
host_name = '172.16.81.252'
database_name = 'apom'
user_name = 'postgres'
password = 'postgres'
mean_aod_mod = 0.366114584
abs_aod_mod = 1.029983261
mean_aod_myd = 0.410938839
abs_aod_myd = 1.098215224
mean_avg_temp_mod = 28.58782188
abs_avg_temp_mod = 12.02451107
mean_avg_temp_myd = 28.63081177
abs_avg_temp_myd = 12.06750096
res_folder_mod = "res/SatResampMOD04"
res_folder_myd = "res/SatResampMYD04"
prod_folder_mod = "prod/ProdMODPM"
prod_folder_myd = "prod/ProdMYDPM"
code_folder = "C:/"
#code_folder = "/var/www/html/MODIS/HaPV/MODIS_Quytrinh/"
met_folder = paste(code_folder,"MetTiff/",sep="")
shape_file = paste(code_folder,"BDNEN/VNM_adm0.shp",sep="")
tif2raster_file = paste(code_folder,"tif2rasref.py",sep="")
create_aqi_file = paste(code_folder,"MODIS_CLIP_PM_AQI.py",sep="")
create_png_file = paste(code_folder,"create_png.py",sep="")
create_aot_file = paste(code_folder,"aot_processing.py",sep="")
modis_log_file = paste(code_folder,"modis.log",sep="")
cross_result_file = paste(code_folder,"cross_kriging.csv",sep="")
get_time_query = "select aqstime from %s where filename like "
mod_query = "SELECT distinct mod04.aqstime, mod04.filename,mod04.filepath,mod07.filename as temp_filename,mod07.filepath as temp_filepath FROM res.satresampmod04 as mod04 inner join res.satresampmod07temperature as mod07 ON (mod04.aqstime = mod07.aqstime) where mod07.filename like'%_T_10km%' and mod04.aqstime <='2013-12-26 00:00:00'"
insert_mod_query = "INSERT INTO prodpm.prodmodispm_vn_collection0(aqstime, rasref, filename, filepath, gridid,pmid,max,min,avg, type,sourceid) VALUES ('"
#Regression function
regress_predict = function(sate_data,aod,avg_temp){
if(sate_data=="mod"){
pm25 = 21.44446906 * aod + (-26.98361769)*avg_temp + 25.28728856
}else{
pm25 = 27.4005404 * aod + (-18.90869037)*avg_temp + 18.99322277
}
return(pm25)
}
#get data from DB
getDataFromDB <- function(sql_command) {
out = tryCatch(
{
driver = dbDriver("PostgreSQL")
connect = dbConnect(driver,dbname = database_name,host = host_name,port=5432,user = user_name,password= password)
rs = dbSendQuery(connect,sql_command)
data=fetch(rs,n=-1)
#cat("connect to DB sucessful",file="C:/test.txt",sep="\n",append=TRUE)
dbDisconnect(connect)
dbUnloadDriver(driver)
return (data)
},
error=function(cond) {
cat("Error:",cond$message,"\n",file = modis_log_file,sep="",append=TRUE)
return(NA)
},
warning=function(cond) {
cat("warning:",cond$message,"\n",file = modis_log_file,sep="",append=TRUE)
return(NA)
},
finally={
#dbDisconnect(connect)
#dbUnloadDriver(driver)
#print("done")
}
)
return(out)
}
#insert data to DB
insertDataToDB <- function(sql_command) {
out = tryCatch(
{
driver = dbDriver("PostgreSQL")
connect = dbConnect(driver,dbname = database_name,host = host_name,port=5432,user = user_name,password= password)
rs = dbSendQuery(connect,sql_command)
dbDisconnect(connect)
dbUnloadDriver(driver)
return (1)
},
error=function(cond) {
cat("Error:",cond$message,"\n",file = modis_log_file,sep="",append=TRUE)
return (NA)
},
warning=function(cond) {
cat("warning:",cond$message,"\n",file = modis_log_file,sep="",append=TRUE)
return (NA)
},
finally={
#dbDisconnect(connect)
#dbUnloadDriver(driver)
#print("done")
}
)
return(out)
}
#Create Kriging image from regression image
createKrigingImage = function(regressPm_file){
regressPm_mask_file = str_replace(regressPm_file,".tif","_mask.tif")
file.copy(regressPm_file,regressPm_mask_file)
gdal_rasterize(shape_file,regressPm_mask_file,b=1,i=TRUE,burn=-9999,l="VNM_adm0")
#PM values
pmRaster=raster(regressPm_mask_file)
pm=values(pmRaster)
corxy=coordinates(pmRaster)
x=corxy[,'x']
y=corxy[,'y']
totalCell=length(pmRaster)
cell = c(1:totalCell)
table=data.frame(cell,x,y,pm)
newTable=table
#testTable=subset(table,pm<0)
#trainTable=subset(table,pm>=0)
trainTable=subset(table,!is.na(pm)&pm!=-9999)
testTable=subset(table,is.na(pm))
auto_trainTable = trainTable
coordinates(auto_trainTable) =~ x+y
auto_variogram = autofitVariogram(pm~1,auto_trainTable)
auto_model = auto_variogram$var_model$model[2]
auto_sill = auto_variogram$var_model$psill[2]
auto_range = auto_variogram$var_model$range[2]
#caculate variogram
empiVario=variogram(pm~1,locations=~x+y,data=trainTable)
sphModel=vgm(psill=auto_sill,model=auto_model,nugget=0,range=auto_range)
sphFit=fit.variogram(empiVario,sphModel)
#sph fit
#sill=min(empiVario$gamma)
#sphModel=vgm(psill=sill,model="Sph",nugget=0,range=min(empiVario$dist))
#sphModel=vgm(model="Sph",nugget=0,range=1)
#sphFit=fit.variogram(empiVario,sphModel)
universal_result=krige(id="pm",formula=pm~x+y,data=trainTable,newdata=testTable,model=sphFit,locations=~x+y)
#edit tiff
newTable$pm[is.na(newTable$pm)] = universal_result[,3]
universalPMRaster=pmRaster
universalPMRaster[1:totalCell]=newTable$pm
#universalPMValue=universal_result[,3]
#universalPMRaster[1:totalCell]=universalPMValue
#edit error tiff
#errorPMRaster=pmRaster
#errorPMValue=universal_result[,4]
#errorPMRaster[1:totalCell]=errorPMValue
#save uk result to tiff
uk_file = str_replace(regressPm_file,"rg.tif","uk.tif")
writeRaster(universalPMRaster,filename=uk_file,format="GTiff",overwrite=TRUE)
gdal_rasterize(shape_file,uk_file,b=1,i=TRUE,burn=-9999,l="VNM_adm0")
#set n/a value
new_uk_raster = raster(uk_file)
new_uk_value = values(new_uk_raster)
new_uk_value[new_uk_value==-9999]<-NA
new_uk_value[new_uk_value<0]<-0
new_uk_raster[1:totalCell] = new_uk_value
writeRaster(new_uk_raster,filename=uk_file,format="GTiff",overwrite=TRUE)
# save uk error to tiff
# error_file = str_replace(regressPm_file,"rg.tif","error.tif")
# writeRaster(errorPMRaster,filename=error_file,format="GTiff",overwrite=TRUE)
# gdal_rasterize(shape_file,error_file,b=1,i=TRUE,burn=-9999,l="VNM_adm0")
# cross-validation
universal_result_3=krige.cv(pm~x+y,trainTable,sphFit,locations=~x+y,nfold=3)
# Universal statis
universal_cor=cor(universal_result_3$var1.pred,universal_result_3$observed)
universal_rmse=sqrt(sum((universal_result_3$residual)^2)/nrow(universal_result_3))
universal_re=sum(abs(universal_result_3$residual)/universal_result_3$observed)/nrow(universal_result_3)
filename = uk_file
samples = nrow(trainTable)
r2 = universal_cor*universal_cor
rmse = universal_rmse
re = universal_re*100
rs = read.csv(cross_result_file, header=T, sep=",")
rs = rbind(rs,data.frame(filename,sample,r2,rmse,re))
write.csv(rs,cross_result_file)
}
#Create regression and kriging images and insert to DB
create_pm_image = function(sate_data,aod_file,source_id){
if(sate_data=="mod"){
type = 0
time_query = sprintf(get_time_query, "res.satresampmod04")
res_folder = res_folder_mod
prod_folder = prod_folder_mod
min_aod = mean_aod_mod
max_aod = abs_aod_mod
min_temp = mean_avg_temp_mod
max_temp = abs_avg_temp_mod
}else{
type = 1
time_query = sprintf(get_time_query, "res.satresampmyd04")
res_folder = res_folder_myd
prod_folder = prod_folder_myd
min_aod = mean_aod_myd
max_aod = abs_aod_myd
min_temp = mean_avg_temp_myd
max_temp = abs_avg_temp_myd
}
# get aqstime base on file name
file_name = basename(aod_file)
file_name = str_replace(file_name,".tif","")
time_query = paste(time_query,"'",file_name,"%'",sep="")
data = getDataFromDB(time_query)
if(!is.na(data)){
mod04_aqstime = data$aqstime[1]
aqstime = strptime(mod04_aqstime,format="%Y-%m-%d %H:%M:%S")
aqstime = aqstime + 25200
month = format.Date(aqstime,"%m")
year = format.Date(aqstime,"%Y")
# crop aod file base on shap file
aod_mask_file = str_replace(aod_file,".tif","_mask.tif")
file.copy(aod_file,aod_mask_file)
gdal_rasterize(shape_file,aod_mask_file,b=1,i=TRUE,burn=-9999,l="VNM_adm0")
aod_dataset = raster(aod_mask_file)
aod_dataset[aod_dataset[] == -9999] <- NA
if(file.exists(aod_mask_file)){
file.remove(aod_mask_file)
}
#log to file
mask_str = "[ %s ] Mask image [ %s ] sucessful"
mask_log = sprintf(mask_str, Sys.time(),aod_file)
cat(mask_log,"\n",file = modis_log_file,sep="",append=TRUE)
aod_data = values(aod_dataset)
aod_data = aod_data * 0.00100000004749745
corxy = coordinates(aod_dataset)
x = corxy[,'x']
y = corxy[,'y']
avg_temp_file = paste(met_folder,"temp",as.numeric(month),".tif",sep="")
avg_temp_dataset = raster(avg_temp_file)
avg_temp_data = values(avg_temp_dataset)
# chuan hoa aod va temp
aod_data = (aod_data - min_aod)/max_aod
avg_temp_data = (avg_temp_data - min_temp)/max_temp
#hoi quy gia tri pm
pm25 = regress_predict(sate_data,aod_data,avg_temp_data)
total_pixel = sum(!is.na(aod_data))
ratio = total_pixel/2024*100
print(paste("Pixel:",total_pixel,"Cloud ratio:",ratio,sep=" "))
############## CREATE REGRESSION IMAGES
# Remove out of range pixel
table = data.frame(x,y,aod_data,avg_temp_data,pm25)
table$pm25[table$aod_data<-1|table$aod_data>1|table$avg_temp_data<-1|table$avg_temp_data>1]<-NA
# save regression images
og_raster = aod_dataset
totalCell = ncell(og_raster)
og_raster[1:totalCell] = table$pm25
# edit file name
#mod04
pm_file = str_replace(aod_file,".hdf_DT_10km","")
#myd04
pm_file = str_replace(aod_file,".hdf_DB_10km","")
pm_file = str_replace(aod_file,".tif","_rg.tif")
pm_file = str_replace(pm_file,res_folder,prod_folder)
#create output path
out_path = dirname(pm_file)
dir.create(path=out_path,showWarnings = FALSE,recursive = TRUE,mod="777")
writeRaster(og_raster,filename=pm_file,format="GTiff",overwrite=TRUE)
#gdal_rasterize(shape_file,pm_file,b=1,i=TRUE,burn=-9999,l="VNM_adm0")
# write to log file
mask_str = "[ %s ] create regression image [ %s ] sucessful"
mask_log = sprintf(mask_str, Sys.time(),pm_file)
cat(mask_log,"\n",file = modis_log_file,sep="",append=TRUE)
#create pm image
createKrigingImage(pm_file)
print ("Create Kriging image good!");
uk_file = str_replace(pm_file,"rg.tif","uk.tif")
# write finish to log file
mask_str = "[ %s ] create kriging image [ %s ] sucessful"
mask_log = sprintf(mask_str, Sys.time(),uk_file)
cat(mask_log,"\n",file = modis_log_file,sep="",append=TRUE)
python.assign("raster_file",uk_file)
python.load(tif2raster_file)
raster_ref = python.get("raster_ref")
uk_raster = raster(uk_file)
uk_value = values(uk_raster)
uk_value = uk_value[uk_value!=-9999]
max_value = max(uk_value,na.rm = TRUE)
min_value = min(uk_value,na.rm = TRUE)
avg_value = mean(uk_value,na.rm = TRUE)
start_index = regexpr("apom/prod",uk_file)
mid_index = regexpr("M[^M]*$",uk_file)
end_index = nchar(uk_file)
uk_file_path = substr(uk_file,start_index,mid_index-1)
uk_file_name = substr(uk_file,mid_index,end_index)
aqstime2 = aqstime - 25200
query = paste(insert_mod_query,aqstime2,"'::timestamp, '",raster_ref,"'::raster, '",uk_file_name,"', '",uk_file_path,"', 1, 1, ",max_value,", ",min_value,", ",avg_value,", ",type,", ",source_id,")",sep="")
#print(query)
#insert pm images to database
out = insertDataToDB(query)
if(!is.na(out)){
print ("Insert to database good!");
mask_str = "[ %s ] insert kriging image [ %s ] to DB sucessful"
mask_log = sprintf(mask_str, Sys.time(),uk_file)
cat(mask_log,"\n",file = modis_log_file,sep="",append=TRUE)
}else{
print ("Can not insert to database");
}
############# END CODE
if(ratio>=30){
# write ratio to log file
mask_str = "[ %s ] data ratio is [ %s ]"
mask_log = sprintf(mask_str, Sys.time(),ratio)
cat(mask_log,"\n",file = modis_log_file,sep="",append=TRUE)
# create regression image
# end code
}else{
# write ratio to log file
mask_str = "[ %s ] data ratio is [ %s ]"
mask_log = sprintf(mask_str, Sys.time(),ratio)
cat(mask_log,"\n",file = modis_log_file,sep="",append=TRUE)
# write finish to log file
mask_str = "[ %s ] Process file name [ %s ] finish"
mask_log = sprintf(mask_str, Sys.time(),aod_file)
cat(mask_log,"\n",file = modis_log_file,sep="",append=TRUE)
}
}
}
# Test example
sat_data = "mod"
source_id = 0
#aod_file = "/apom_data/apom/res/SatResampMOD04/2015/MOD04_L2.A2015107.0310.051.2015107141007/MOD04_L2.A2015107.0310.051.2015107141007.hdf_DT_10km.tif"
# mod/myd , aot file, temp file
# Test command
# create_pm_image("mod","linear","4np",aod_file,temp_file, source_id)
#insertDataToDB(mod_query)
#data = insertDataToDB(mod_query)
#total_record = nrow(data)
#print(total_record)
#for(i in 1:total_record){
# aod_filename = str_trim(data$filename[i])
# aod_path = str_trim(data$filepath[i])
# aod_file = paste(data_folder,aod_path,aod_filename,sep = "")
# temp_filename = str_trim(data$temp_filename[i])
# temp_path = str_trim(data$temp_filepath[i])
# temp_file = paste(data_folder,temp_path,temp_filename,sep = "")
# create_pm_image("mod","linear","4np",aod_file,temp_file)
#}
|
9a009d1a2b4ec464ea4b86e1bec9f27a4824e520
|
89c966c31c6fee422bcf63cbd38675a41b910ada
|
/model/select_best.R
|
795cdab4ee79f0478bcce5f394b96f283eba7636
|
[] |
no_license
|
Yixf-Education/course_Statistics_Story
|
ad17b0373d35f506131af209b94e41c4956c4d42
|
5a16791517169d657d00e5470f2cf1eb0e722e6b
|
refs/heads/master
| 2023-04-03T02:45:53.099737
| 2023-04-01T08:13:43
| 2023-04-01T08:13:43
| 81,038,740
| 34
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
r
|
select_best.R
|
#!/usr/bin/Rscript
library(tidyverse)
dfa <- data.frame(cutoff=numeric(), result=numeric())
for (cutoff in seq(from=10, to=90, by=10)){
for (i in 1:10000){
x <- sample(1:100, size=100)
x_pass <- x[1:cutoff]
max_pass <- max(x_pass)
cutoff1 <- cutoff + 1
x_keep <- x[cutoff1:100]
x_select <- head(x_keep[x_keep>max_pass], 1)
result <- ifelse(length(x_select)==1, x_select, x[100])
dfa <- bind_rows(dfa, data.frame(cutoff=cutoff, result=result))
}
}
write_tsv(dfa, "all_results.txt")
g <- ggplot(dfa, aes(x=result)) + geom_density() + facet_grid(rows = vars(cutoff))
ggsave("all_density.png", g, width=15, height=20)
dfam <- dfa %>% group_by(cutoff) %>% summarise(median=median(result), mean=mean(result))
write_tsv(dfam, "all_median.txt")
dfac <- dfa %>% filter(result>=98) %>% group_by(cutoff, result) %>% summarise(number=n()) %>% mutate(percent=number/10000*100)
write_tsv(dfac, "all_number.txt")
dfb <- data.frame(cutoff=numeric(), result=numeric())
for (cutoff in seq(from=30, to=40, by=1)){
for (i in 1:100000){
x <- sample(1:100, size=100)
x_pass <- x[1:cutoff]
max_pass <- max(x_pass)
cutoff1 <- cutoff + 1
x_keep <- x[cutoff1:100]
x_select <- head(x_keep[x_keep>max_pass], 1)
result <- ifelse(length(x_select)==1, x_select, x[100])
dfb <- bind_rows(dfb, data.frame(cutoff=cutoff, result=result))
}
}
write_tsv(dfb, "refine_results.txt")
g <- ggplot(dfb, aes(x=result)) + geom_density() + facet_grid(rows = vars(cutoff))
ggsave("refine_density.png", g, width=15, height=20)
dfbm <- dfb %>% group_by(cutoff) %>% summarise(median=median(result), mean=mean(result))
write_tsv(dfbm, "refine_median.txt")
dfbc <- dfb %>% filter(result>=98) %>% group_by(cutoff, result) %>% summarise(number=n()) %>% mutate(percent=number/100000*100)
write_tsv(dfbc, "refine_number.txt")
|
730fb4f1e745104ba40d3be89aeb476a8b9a32ac
|
76dd1344fcb157f5f557232b473890eaae3a06f2
|
/ceratodon_area_v_biomass.R
|
15663ac1d50e8727d5d75a42839e6013c1e4a867
|
[] |
no_license
|
sarahcarey/area_v_biomass_Ceratodon
|
956a1277db8128742f23953ae0c72e224728cca1
|
0a0f6a535e27039117ea0db90a5b3b33c6ec6a0d
|
refs/heads/master
| 2021-04-08T15:07:29.785421
| 2020-03-20T17:58:15
| 2020-03-20T17:58:15
| 248,785,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,187
|
r
|
ceratodon_area_v_biomass.R
|
## R script for Area from image analyses accurately estimates dry-weight biomass of juvenile moss tissue
## Burtscher and List et al.
## Script by W. Burtscher and S. Carey
## R version 3.5.3
#### import data with averages for clonal replicates excluding dead replicates for stats and scatterplot ####
# these data can be found in Table S1.
growth_data_nodead <- read.csv("ceratodon_area_v_biomass_avg.csv")
growth_data_nodead$Week <- as.factor(growth_data_nodead$Week)
# parse data by week
growth_data_nodead_week1 <- subset(growth_data_nodead, Week=="1")
growth_data_nodead_week2 <- subset(growth_data_nodead, Week=="2")
growth_data_nodead_week3 <- subset(growth_data_nodead, Week=="3")
#### import unaveraged data excluding dead replicates for box plots ####
# these data can be found in Table S2.
raw_data <- read.csv("ceratodon_area_v_biomass_raw.csv")
raw_data$Week <- as.factor(raw_data$Week)
# parse data by week
raw_data_week1 <- subset(raw_data, Week=="1")
raw_data_week2 <- subset(raw_data, Week=="2")
raw_data_week3 <- subset(raw_data, Week=="3")
raw_data_week1$Individual = factor(raw_data_week1$Individual,c("Alsk_F","Alsk_M","Chile_F","Chile_M","Dur_F","Dur_M","Ecud_F","Ecud_M","Ren_F","Ren_M"))
#### correlation between area and biomass for each week ####
# week 1
area_biomass_corr_week1 <- cor(growth_data_nodead_week1$Area,growth_data_nodead_week1$Biomass)
area_biomass_corr_week1
# 0.3890323
# week 2
area_biomass_corr_week2 <- cor(growth_data_nodead_week2$Area,growth_data_nodead_week2$Biomass)
area_biomass_corr_week2
# 0.8580126
# week 3
area_biomass_corr_week3 <- cor(growth_data_nodead_week3$Area,growth_data_nodead_week3$Biomass)
area_biomass_corr_week3
# 0.8563432
#### ANOVA of biomass and inoculum size for each week ####
# week 1
biomass_aov_nodead_week1 <- aov(Biomass ~ Inoculum_Size, data=growth_data_nodead_week1)
summary(biomass_aov_nodead_week1)
#Df Sum Sq Mean Sq F value Pr(>F)
#Inoculum_Size 1 2.195 2.195 28.51 4.48e-05 ***
# Residuals 18 1.386 0.077
# week 2
biomass_aov_nodead_week2 <- aov(Biomass ~ Inoculum_Size, data=growth_data_nodead_week2)
summary(biomass_aov_nodead_week2)
#Df Sum Sq Mean Sq F value Pr(>F)
#Inoculum_Size 1 4.43 4.426 2.139 0.161
#Residuals 18 37.25 2.069
# week 3
biomass_aov_nodead_week3 <- aov(Biomass ~ Inoculum_Size, data=growth_data_nodead_week3)
summary(biomass_aov_nodead_week3)
#Df Sum Sq Mean Sq F value Pr(>F)
#Inoculum_Size 1 8.24 8.240 1.127 0.302
#Residuals 18 131.56 7.309
#### ANOVA of area and inoculum size for each week ####
# week 1
area_aov_nodead_week1 <- aov(Area ~ Inoculum_Size, data=growth_data_nodead_week1)
summary(area_aov_nodead_week1)
#Df Sum Sq Mean Sq F value Pr(>F)
#Inoculum_Size 1 3.6 3.61 0.038 0.847
#Residuals 18 1690.3 93.90
# week 2
area_aov_nodead_week2 <- aov(Area ~ Inoculum_Size, data=growth_data_nodead_week2)
summary(area_aov_nodead_week2)
#Df Sum Sq Mean Sq F value Pr(>F)
#Inoculum_Size 1 839 839 0.208 0.654
#Residuals 18 72463 4026
# week 3
area_aov_nodead_week3 <- aov(Area ~ Inoculum_Size, data=growth_data_nodead_week3)
summary(area_aov_nodead_week3)
#Df Sum Sq Mean Sq F value Pr(>F)
#Inoculum_Size 1 962 962 0.088 0.77
#Residuals 18 196175 10899
### plot figures and save in TIFF format ####
# scatterplot of area and biomass for each week of growth
tiff("Figure1.tiff", units="in", width=8, height=3, res=300)
par(mar=c(4,4,1,1), oma=c(0,0,0,0), mgp=c(4.5,1,0), mfrow=c(1,3))
plot(growth_data_nodead_week1$Area, growth_data_nodead_week1$Biomass,
font.main = 2, main="(a)",
xlab = "",
ylab = "",
xlim=c(0,50),
ylim=c(0,2),
cex.lab=1, cex.main=1, cex.axis=0.75,
pch=1, cex=1, col="black")
abline(lm(growth_data_nodead_week1$Biomass~growth_data_nodead_week1$Area))
title(ylab="Biomass (mg)",line=2, cex.lab=1.25)
plot(growth_data_nodead_week2$Area, growth_data_nodead_week2$Biomass,
font.main = 2, main="(b)",
xlab = "",
ylab= "",
xlim=c(0,250),
ylim=c(0,8),
cex.lab=1, cex.main=1, cex.axis=0.75,
pch=1, cex=1, col="black")
abline(lm(growth_data_nodead_week2$Biomass~growth_data_nodead_week2$Area))
title(xlab = expression(Area~(mm^{2})),line=3, cex.lab=1.25)
plot(growth_data_nodead_week3$Area, growth_data_nodead_week3$Biomass,
font.main = 2, main="(c)",
xlab = "",
ylab= "",
xlim=c(0,450),
ylim=c(0,15),
cex.lab=1, cex.main=1, cex.axis=0.75,
pch=1, cex=1, col="black")
abline(lm(growth_data_nodead_week3$Biomass~growth_data_nodead_week3$Area))
dev.off()
# boxplot of biomass for each isolate and inoculum size for each week of growth
tiff("Figure2.tiff", units="in", width=8, height=3, res=300)
par(mar=c(2,4,1,1), oma=c(2,0,0,0), mgp=c(4.5,1,0), mfrow=c(1,3))
boxplot(Biomass ~ FLabel, data=raw_data_week1, las=2,
font.main = 2, main = "(a)",
col=c('gray90','gray90', 'gray40', 'gray40'),
ylab="",
cex.lab=1, cex.main=1, cex.axis=0.75,
ylim=c(0,20))
stripchart(Biomass ~ FLabel, vertical=TRUE, data=raw_data_week1,
add=TRUE, pch=1, cex=1, col='black')
title(ylab="Biomass (mg)", line=2, cex.lab=1.25)
boxplot(Biomass ~ FLabel, data=raw_data_week2, las=2,
font.main = 2, main = "(b)",
col=c('gray90','gray90', 'gray40', 'gray40'),
cex.lab=1, cex.main=1, cex.axis=0.75,
ylim=c(0,20))
stripchart(Biomass ~ FLabel, vertical=TRUE, data=raw_data_week2,
add=TRUE, pch=1, cex=1, col='black')
boxplot(Biomass ~ FLabel, data=raw_data_week3, las=2,
font.main = 2, main = "(c)",
col=c('gray90','gray90', 'gray40', 'gray40'),
cex.lab=1, cex.main=1, cex.axis=0.75,
ylim=c(0,20))
stripchart(Biomass ~ FLabel, vertical=TRUE, data=raw_data_week3,
add=TRUE, pch=1, cex=1, col='black')
dev.off()
# boxplot of area for each isolate and inoculum size for each week of growth
tiff("Figure3.tiff", units="in", width=8, height=3, res=300)
par(mar=c(2,4,1,1), oma=c(2,0,0,0), mgp=c(4.5,1,0), mfrow=c(1,3))
boxplot(Area ~ FLabel, data=raw_data_week1, las=2,
font.main = 2, main = "(a)",
col=c('gray90','gray90', 'gray40', 'gray40'),
ylab="",
cex.lab=1, cex.main=1, cex.axis=0.75,
ylim=c(0,550))
stripchart(Area ~ FLabel, vertical=TRUE, data=raw_data_week1,
add=TRUE, pch=1, cex=1, col='black')
title(ylab=expression(Area~(mm^{2})), line=2, cex.lab=1.25)
boxplot(Area ~ FLabel, data=raw_data_week2, las=2,
font.main = 2, main = "(b)",
col=c('gray90','gray90', 'gray40', 'gray40'),
cex.lab=1, cex.main=1, cex.axis=0.75,
ylim=c(0,550))
stripchart(Area ~ FLabel, vertical=TRUE, data=raw_data_week2,
add=TRUE, pch=1, cex=1, col='black')
boxplot(Area ~ FLabel, data=raw_data_week3, las=2,
font.main = 2, main = "(c)",
col=c('gray90','gray90', 'gray40', 'gray40'),
cex.lab=1, cex.main=1, cex.axis=0.75,
ylim=c(0,550))
stripchart(Area ~ FLabel, vertical=TRUE, data=raw_data_week3,
add=TRUE, pch=1, cex=1, col='black')
dev.off()
|
fdbe2ed456d6286744911814caae707512045230
|
0df8bf87849196facb24125cbc3d455010c34265
|
/01_data_import.R
|
10b18455778829ccadbdc955db0b6935fe5b2360
|
[] |
no_license
|
datahoundz/Springboard_Data_Science
|
427768ce34917dea4b72841c600bad88126a45ee
|
da230ea3d160bc40c57a1fbde6602be6bd4e42fe
|
refs/heads/master
| 2021-04-28T01:25:40.472836
| 2018-04-26T17:50:59
| 2018-04-26T17:50:59
| 122,276,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,630
|
r
|
01_data_import.R
|
library(xml2)
library(readxl)
library(readr)
library(tidyr)
library(dplyr)
library(purrr)
library(ggplot2)
library(lubridate)
# Set options to limit sci notation and decimal places
options(scipen = 999, digits = 3)
# =======================================================================
#
# Data Import - CDC Suicides, CDC Homicides, CDC Population
#
# =======================================================================
# Data accessed at
# https://wonder.cdc.gov/
# Import CDC Suicide Data (edited version with additional footer data deleted)
suicides_df <- read_tsv("data_edited/CDC_FirearmSuicide_1999-2016.txt")
# Review general layout by viewing head of file
head(suicides_df)
# Remove duplicate/empty columns, make Rate numeric
suicides_df$Notes <- NULL
suicides_df$'State Code' <- NULL
suicides_df$'Year Code' <- NULL
suicides_df$`Crude Rate` <- as.numeric(suicides_df$`Crude Rate`)
# Standardize and sepcify variable names (plan to merge w/ homicide data)
suicides_df <- suicides_df %>%
rename(state = State) %>%
rename(year = Year) %>%
rename(sui_cnt = Deaths) %>%
rename(sui_pop = Population) %>%
rename(sui_rate = 'Crude Rate')
# Check results
head(suicides_df)
# Check for NAs and other data issues
summary(suicides_df)
# 13 NA's in suicide rate?
suicides_df %>%
filter(is.na(sui_rate))
# DC & RI too few for calculation, replace NA w/ calculation
suicides_df <- mutate(suicides_df, sui_rate = ifelse(is.na(sui_rate), round(sui_cnt / sui_pop * 100000, 1), sui_rate))
summary(suicides_df)
# Run histograms to check distribution of values, any outliers
hist(suicides_df$sui_cnt)
hist(suicides_df$sui_rate)
plot(suicides_df$sui_pop, suicides_df$sui_cnt)
# Looks like everything checks out: fairly normal dist on rate, right skewed count due to pop size
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(suicides_df, path = "data_cleaned/suicides_df.csv")
# =======================================================================
# Repeat process w/ appropriate variable adjustments for homicide data
homicides_df <- read_tsv("data_edited/CDC_FirearmHomicide_1999-2016.txt")
head(homicides_df)
homicides_df$Notes <- NULL
homicides_df$'State Code' <- NULL
homicides_df$'Year Code' <- NULL
homicides_df$`Crude Rate` <- as.numeric(homicides_df$`Crude Rate`)
homicides_df <- homicides_df %>%
rename(state = State) %>%
rename(year = Year) %>%
rename(hom_cnt = Deaths) %>%
rename(hom_pop = Population) %>%
rename(hom_rate = 'Crude Rate')
# Check results
head(homicides_df)
# Check for NAs and other data issues
summary(homicides_df)
# 86 NA's in hom_rate
homicides_df %>%
filter(is.na(hom_rate))
# Same issue as suicides data - replace NA hom_rate w/ calculation
homicides_df <- mutate(homicides_df, hom_rate = ifelse(is.na(hom_rate), round(hom_cnt / hom_pop * 100000, 1), hom_rate))
summary(homicides_df)
# Run histograms to check distribution of values, any outliers
hist(homicides_df$hom_cnt)
hist(homicides_df$hom_rate)
plot(homicides_df$hom_pop, homicides_df$hom_cnt)
# Right skewed count due to pop size. Curious long right tail in dist on rate?
homicides_df %>%
filter(hom_rate > 10) %>%
arrange(desc(hom_rate)) %>%
print(n = 25)
# DC rate extremely high, partially due to small population
hist(homicides_df$hom_rate[homicides_df$state != "District of Columbia"])
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(homicides_df, path = "data_cleaned/homicides_df.csv")
# =======================================================================
# Modify process to adjust for population data
# Import CDC Population Data (baseline for joining suicide/homicide data)
population_df <- read_tsv("data_edited/CDC_PopEst_1990-2016.txt")
head(population_df)
# Similar adjustments for population table
population_df$Notes <- NULL
population_df$`Yearly July 1st Estimates Code` <- NULL
population_df$`State Code` <- NULL
population_df <- population_df %>%
rename(state = State) %>%
rename(year = `Yearly July 1st Estimates`) %>%
rename(pop = Population)
# Filter for Years applicable to available CDC data
population_df <- population_df %>%
filter(year >= 1999)
# Check to make sure totals match up (51 states x 18 years)
51*18 == nrow(population_df)
# Check results
head(population_df)
summary(population_df)
hist(population_df$pop)
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(population_df, path = "data_cleaned/population_df.csv")
# =======================================================================
#
# Data Merge - CDC Suicides, CDC Homicides, CDC Population
#
# =======================================================================
# Create standard join key variable for most common table join
join_key <- c("state", "year")
# Join Population base table w/ homicides and suicides tables
gun_deaths_df <- left_join(population_df, homicides_df, by = join_key) %>%
left_join(suicides_df, by = join_key) %>%
select(-hom_pop, -sui_pop)
# Check results
head(gun_deaths_df)
summary(gun_deaths_df)
# Need to address 91 NA's in Homicide and HomicideRate and 6 NA's in Suicide & Suicide Rate
# Select list of States w/ NA in Homicides
# Calculate Min-Max-Mean for each State w/ NA to guide interpolation
gun_deaths_df %>%
filter(is.na(hom_cnt)) %>%
select(state) %>%
unique() %>%
left_join(gun_deaths_df, by = "state") %>%
group_by(state) %>%
summarise(min = min(hom_cnt, na.rm = TRUE), max = max(hom_cnt, na.rm = TRUE), mean = mean(hom_cnt, na.rm = TRUE))
# Results suggest using mean to replace missing values
# Repeat for Suicide data
gun_deaths_df %>%
filter(is.na(sui_cnt)) %>%
select(state) %>%
unique() %>%
left_join(gun_deaths_df, by = "state") %>%
group_by(state) %>%
summarise(min = min(sui_cnt, na.rm = TRUE), max = max(sui_cnt, na.rm = TRUE), mean = mean(sui_cnt, na.rm = TRUE))
# Again the mean looks like the best replacement value
# Replace NA in Suicides/Homicides with Mean for respective State
# Calculate hom_rate and sui_rate to replace NA values
gun_deaths_df <- gun_deaths_df %>%
group_by(state) %>%
mutate(hom_cnt = ifelse(is.na(hom_cnt), as.integer(mean(hom_cnt, na.rm = TRUE)), hom_cnt)) %>%
mutate(sui_cnt = ifelse(is.na(sui_cnt), as.integer(mean(sui_cnt, na.rm = TRUE)), sui_cnt)) %>%
mutate(hom_rate = ifelse(is.na(hom_rate), round(hom_cnt / pop * 100000, 1), hom_rate)) %>%
mutate(sui_rate = ifelse(is.na(sui_rate), round(sui_cnt / pop * 100000, 1), sui_rate))
# Check results
summary(gun_deaths_df)
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(gun_deaths_df, path = "data_cleaned/gun_deaths_df.csv")
# =======================================================================
#
# Import Region/Subregion data to join on State for higher level analysis
#
# =======================================================================
# Regional data information accessed at
# https://www2.census.gov/geo/docs/maps-data/maps/reg_div.txt
regions_df <- read_excel("data_edited/State_FIPS_Codes.xlsx")
# Check data
head(regions_df)
# Convert code fields to integer
regions_df$fips_st <- as.integer(regions_df$fips_st)
regions_df$reg_code <- as.integer(regions_df$reg_code)
regions_df$subreg_code <- as.integer(regions_df$subreg_code)
# Create region and subregion fields w/ code+name for sorting/labeling purposes
regions_df <- regions_df %>%
unite(region, reg_code, reg_name, sep = "-", remove = FALSE) %>%
unite(subregion, subreg_code, subreg_name, sep = "-", remove = FALSE)
# Check results
head(regions_df)
summary(regions_df)
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(regions_df, path = "data_cleaned/regions_df.csv")
# =======================================================================
#
# Import Boston University School of Public Health Gun Law Data
#
# =======================================================================
# Data accessed at
# https://www.statefirearmlaws.org/table.html
state_laws_df <- read.csv("data_edited/state_gun_law_database.csv")
state_codes_df <- read_xlsx("data_edited/state_gun_laws_codebook.xlsx")
head(state_laws_df)
str(state_laws_df)
head(state_codes_df)
str(state_codes_df)
# Clean up category names in State Firearm Codes
state_codes_df <- state_codes_df %>%
select(cat_code = `Category Code`, cat = Category, sub_cat = `Sub-Category`, var_name = `Variable Name`)
# Filter state law data for 1999-2016 period only
state_laws_df <- state_laws_df %>%
filter(year >= 1999 & year <= 2016)
# Address issues related to large number of variables in state_laws_df
# Create simplified table with total laws for general analysis
state_laws_total_df <- state_laws_df %>%
select(state, year, lawtotal)
# Create a second simplified table with category totals using state codes data
# View category names for var_name lookup
unique(state_codes_df$cat)
# Use code below to generate var_name by cat for mutate below
state_codes_df %>%
filter(cat == "Buyer regulations") %>%
select(var_name)
# Collapse 134 individual variables in to 14 larger category groupings
laws_cat_df <- state_laws_df %>%
mutate(deal_reg = dealer + dealerh + recordsall + recordsdealerh + recordsall +
reportdealer + reportdealerh + reportall + reportallh + purge + residential +
theft + security + inspection + liability + junkgun) %>%
mutate(buy_reg = waiting + waitingh + permit + permith + permitlaw + fingerprint +
training + registration + registrationh + defactoreg + defactoregh + age21handgunsale +
age18longgunsale + age21longgunsale + age21longgunsaled + loststolen + onepermonth) %>%
mutate(high_risk = felony + violent + violenth + violentpartial + invcommitment +
invoutpatient + danger + drugmisdemeanor + alctreatment + alcoholism) %>%
mutate(bkgrnd_chk = universal + universalh + gunshow + gunshowh + universalpermit + universalpermith +
backgroundpurge + threedaylimit + mentalhealth + statechecks + statechecksh) %>%
mutate(ammo_reg = ammlicense + ammrecords + ammpermit + ammrestrict + amm18 +
amm21h + ammbackground) %>%
mutate(poss_reg = age21handgunpossess + age18longgunpossess + age21longgunpossess +
gvro + gvrolawenforcement + college + collegeconcealed + elementary + opencarryh +
opencarryl + opencarrypermith + opencarrypermitl) %>%
mutate(conceal_reg = permitconcealed + mayissue + showing + ccrevoke + ccbackground +
ccbackgroundnics + ccrenewbackground) %>%
mutate(assault_mag = assault + onefeature + assaultlist + assaultregister + assaulttransfer +
magazine + tenroundlimit + magazinepreowned) %>%
mutate(child_acc = lockd + lockp + lockstandards + locked + capliability + capaccess +
capuses + capunloaded + cap18 + cap16 + cap14) %>%
mutate(gun_traff = traffickingbackground + traffickingprohibited + traffickingprohibitedh +
strawpurchase + strawpurchaseh + microstamp + personalized) %>%
mutate(stnd_grnd = nosyg) %>%
mutate(pre_empt = preemption + preemptionbroad + preemptionnarrow) %>%
mutate(immunity_ = immunity) %>%
mutate(dom_viol = mcdv + mcdvdating + mcdvsurrender + mcdvsurrendernoconditions +
mcdvsurrenderdating + mcdvremovalallowed + mcdvremovalrequired + incidentremoval +
incidentall + dvro) %>%
select(state, year, contains("_"))
head(laws_cat_df)
summary(laws_cat_df)
str(laws_cat_df)
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(state_laws_df, path = "data_cleaned/state_laws_df.csv")
write_csv(laws_cat_df, path = "data_cleaned/laws_cat_df.csv")
# =======================================================================
#
# Import Giffords Law Center Gun Law Data
#
# =======================================================================
# Data accessed at
# http://lawcenter.giffords.org/
# Import Giffords Law Center data, compiled in CSV from website data
giff_grd_df <- read.csv("data_edited/giffords_gunlawscorecard.csv")
# Import LetterGardeConverter to translate letter to numeric grade
grd_conv_df <- read.csv("data_edited/LetterGradeConverter.csv")
# Review data frame contents
head(grd_conv_df)
head(giff_grd_df)
str(giff_grd_df)
summary(giff_grd_df)
# Reverse numerical ordering of death_rnk to 1 for Fewest 50 for Most
# More intuitive to have rank on both reflect greater safety
giff_grd_df <- giff_grd_df %>%
mutate(death_rnk = 51 - death_rnk)
head(giff_grd_df)
# Calculate numerical grade from grd_conv_df table, re-arrange table
giff_grd_df <- giff_grd_df %>%
left_join(grd_conv_df, by = c("law_grd" = "Letter")) %>%
select(state, year, law_grd, law_score = GPA, law_rnk, death_rnk, bkgrnd_chk) %>%
arrange(state, year)
head(giff_grd_df)
# Create aggreagte data table w/ avg scores and ranks by state
giff_agg_df <- giff_grd_df %>%
group_by(state) %>%
summarise(avg_score = round(mean(law_score), 2), avg_law_rnk = round(mean(law_rnk), 2),
avg_death_rnk = round(mean(death_rnk), 2), avg_bkgrnd = round(mean(bkgrnd_chk), 2))
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(giff_grd_df, path = "data_cleaned/giff_grd_df.csv")
write_csv(giff_agg_df, path = "data_cleaned/giff_agg_df.csv")
# =======================================================================
#
# Import Gun Ownership Data and Guns & Ammo Magazine Rankings
#
# =======================================================================
# Data accessed at
# http://injuryprevention.bmj.com/content/22/3/216
# http://www.gunsandammo.com/second-amendment/best-states-for-gun-owners-2017/
# http://www.gunsandammo.com/network-topics/culture-politics-network/best-states-for-gun-owners-2015/
# Proxy gun ownership data provided via email by Dr. Michael Siegel at Boston University School of Public Health
# Original source file is available in data_original folder.
gun_own_2013_df <- read_excel("data_edited/gun_ownership_rates_2013.xlsx")
gun_ammo_df <- read.csv("data_edited/guns_ammo_rankings.csv")
gun_own_prx_df <- read_excel("data_edited/gun_ownership_proxy.xlsx")
head(gun_own_2013_df)
summary(gun_own_2013_df)
# No data cleaning required
head(gun_ammo_df)
summary(gun_ammo_df)
# No data cleaning required
head(gun_own_prx_df)
summary(gun_own_prx_df)
# Gun Ownership Proxy: Retain only proxy variable for 1999-2106,
# convert proxy to percentage and year to integer
gun_own_prx_df <- gun_own_prx_df %>%
filter(year >= 1999) %>%
mutate(own_proxy = proxy/100, year = as.integer(year)) %>%
select(state, year, own_proxy)
# Export to data_cleaned per Section 3 Data Wrangling Ex. 7
write_csv(gun_own_prx_df, path = "data_cleaned/gun_own_prx_df.csv")
# =======================================================================
#
# Import CDC Suicide Data ALL METHODS
#
# =======================================================================
# Data accessed at
# https://wonder.cdc.gov/
# Import CDC Suicide Data, ALL METHODS (edited version with additional footer data deleted)
all_suicides_df <- read_tsv("data_edited/CDC_AllSuicides_State_1999-2016.txt")
# Review general layout by viewing head of file
head(all_suicides_df)
# Remove duplicate/empty columns, make Rate numeric
all_suicides_df$Notes <- NULL
all_suicides_df$'State Code' <- NULL
all_suicides_df$'Year Code' <- NULL
all_suicides_df$`Crude Rate` <- as.numeric(all_suicides_df$`Crude Rate`)
# Standardize and sepcify variable names (plan to merge w/ homicide data)
all_suicides_df <- all_suicides_df %>%
rename(state = State) %>%
rename(year = Year) %>%
rename(all_sui_cnt = Deaths) %>%
rename(all_sui_pop = Population) %>%
rename(all_sui_rate = 'Crude Rate')
summary(all_suicides_df)
# =======================================================================
#
# Add State Land Area from Census Bureau for Population Density Calculations
#
# =======================================================================
# Data accessed at
# https://factfinder.census.gov/faces/tableservices/jsf/pages/productview.xhtml?src=bkmk
land_area <- read_xlsx("data_edited/state_land_area.xlsx")
# Add population density to population table
population_df <- population_df %>%
left_join(land_area, by = "state") %>%
mutate(pop_density = pop / land_area)
hist(log(population_df$pop_density))
population_df %>%
left_join(regions_df, by = "state") %>%
filter(usps_st != "DC") %>%
group_by(region, usps_st) %>%
summarise(pop_density = mean(pop_density)) %>%
ggplot(aes(x = region, y = pop_density, label = usps_st, color = region)) +
geom_boxplot() +
geom_text()
|
645acbc99eea7811908bd13ed71096f33035187f
|
6c3f506898473dfbbabe0d8544c46b1a5b6cde19
|
/Plot1R.R
|
74a918057b542ed64a4a4322611936b268f27ab5
|
[] |
no_license
|
nickhitt/DataScienceCourseProject4
|
d0b543475e201faf1d7a6d06a1831adae1f840e2
|
c0b695e248caad4069a332283887b2fe73190258
|
refs/heads/main
| 2023-01-09T22:47:16.676634
| 2020-11-09T04:10:14
| 2020-11-09T04:10:14
| 311,218,225
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,106
|
r
|
Plot1R.R
|
## Exploratory Data Analysis Course Project Code Problem 1
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
nei_pm_1999 <- subset(NEI$Emissions, NEI$year == 1999)
nei_pm_2002 <- subset(NEI$Emissions, NEI$year == 2002)
nei_pm_2005 <- subset(NEI$Emissions, NEI$year == 2005)
nei_pm_2008 <- subset(NEI$Emissions, NEI$year == 2008)
sum_1999 <- sum(nei_pm_1999, na.rm = TRUE)
sum_2002 <- sum(nei_pm_2002, na.rm = TRUE)
sum_2005 <- sum(nei_pm_2005, na.rm = TRUE)
sum_2008 <- sum(nei_pm_2008, na.rm = TRUE)
nei_yr_1999 <- 1999
nei_yr_2002 <- 2002
nei_yr_2005 <- 2005
nei_yr_2008 <- 2008
timevec <-c(nei_yr_1999,nei_yr_2002,nei_yr_2005,nei_yr_2008)
emisvec <-c(sum_1999,sum_2002,sum_2005,sum_2008)
vec <- cbind(timevec, emisvec)
png(file="/Users/nicholashitt/Dropbox/My Mac (Nicholas’s MacBook Pro)/Documents/R/Coursera Class/exdata-data-NEI_data/plot1.png",
width=480, height=480)
plot(vec[,1], vec[,2], type = "o", xlab = "Year", ylab = "PM2.5 Emissions")
title("Total US PM2.5 Emissions by Year")
|
70d5f9b9d0c7fb3632bbf30dbb84a2bf3b636c1c
|
a41da845203a8756c0c014ac89ca96c7f0565bd1
|
/databases/reproject_r_file.R
|
13d19f66fcd7d72adbeeff827599747f49809ba9
|
[] |
no_license
|
bailsofhay/downscale_code
|
121e40242770f2f1306241a60d6ebec7e0c4c82d
|
42655c510fa66916c2b87adf9817dc93a22572c2
|
refs/heads/master
| 2020-04-10T07:06:49.725784
| 2018-12-17T15:14:36
| 2018-12-17T15:14:36
| 160,870,602
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
reproject_r_file.R
|
library(raster)
library(gtools)
slurm_id = as.numeric(Sys.getenv('SLURM_ARRAY_TASK_ID'))
ref = raster('/data/gpfs/assoc/gears/tree_vel/dem/raw/ca_nv_dem_utm.tif')
files = list.files(path = "/data/gpfs/assoc/gears/tree_vel/climate/renamed/v", pattern = ".tif", include.dirs = T, full.names = T)
files = mixedsort(files)
extent = c(-180, 180, -90, 90)
r = raster(files[slurm_id])
r = rotate(r)
extent(r) = extent
tmaxojection(r) = crs(ref)
writeRaster(r, file = paste('/data/gpfs/assoc/gears/tree_vel/climate/retmaxojected/v/v_', slurm_id, '.tif', sep = ''), overwrite = T)
|
8a4231c10e78f010e7e240bc4286b072549755f5
|
89555af2f9fb4bb05c9b7da9ba37cd735a2588a9
|
/cachematrix.R
|
674b3c65a40d840a620db95cedc203909c06c65a
|
[] |
no_license
|
nancyirisarri/ProgrammingAssignment2
|
ebb0fbadbcf1e9fdd85a468fe50d83a284eb3eac
|
abe4c1ac1be68d8ec98d0a4c4ebbe48aae9b19af
|
refs/heads/master
| 2021-01-19T02:47:44.343418
| 2017-04-05T10:44:28
| 2017-04-05T10:44:28
| 87,293,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 914
|
r
|
cachematrix.R
|
## Return cached inverse of a matrix or, if not cached,
## compute if.
## Create a matrix object and cache its inverse.
makeCacheMatrix <- function(mat = matrix()) {
inverse <- NULL
set <- function(y) {
mat <<- y
inverse <<- NULL
}
get <- function() mat
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Get from cache or calculate the inverse of the matrix.
cacheSolve <- function(mat, ...) {
# Get from cache and return if not null.
inverse <- mat$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# Calculate the inverse using solve().
data <- mat$get()
inverse <- solve(data, ...)
mat$setinverse(inverse)
inverse
}
|
e5d0d438070b437f09aa29e65933c90112fae140
|
1211bb4bac6accd22ba5252f083bb1efd0d97ba9
|
/man/read_uka.Rd
|
4275acb1c893847b3cd16bd66bc1dbc5c47bb5fd
|
[
"MIT"
] |
permissive
|
CogDisResLab/creedenzymatic
|
dd72766af0914760ced56916518ece6d76387e6f
|
dff08cf325576f9fd2833c3801235a57c12475c7
|
refs/heads/main
| 2023-08-31T09:14:54.478650
| 2023-08-17T21:03:59
| 2023-08-17T21:03:59
| 350,755,295
| 1
| 0
|
NOASSERTION
| 2022-03-17T00:56:51
| 2021-03-23T15:04:41
|
R
|
UTF-8
|
R
| false
| true
| 558
|
rd
|
read_uka.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_uka.R
\name{read_uka}
\alias{read_uka}
\title{Reads and Rank UKA table}
\usage{
read_uka(df, ...)
}
\arguments{
\item{df}{dataframe, UKA table output (requires at least Kinase and Z columns)}
\item{..., }{arguments passed to rank_kinases function}
}
\value{
dataframe, Ranked and quartiled UKA table
}
\description{
reads UKA table and checks for correct format
}
\details{
This function takes in UKA table and rank and quartile kinases based on the absolute Score values
}
|
de06992fa2ce22a28b7bc72db5c6a00f228aa014
|
6a8660cca771bd781cdf09a81c07c5f9bf7e75b5
|
/R/support-functions.R
|
ce7f8b5b2d78b18090963ccfb48c5c769a976cbd
|
[
"MIT"
] |
permissive
|
berkorbay/ibb
|
10455d82a3a07281473ed05bf9729599236993e0
|
848c34cdad9cd33746427d15d88ab066da1e7d02
|
refs/heads/master
| 2023-04-06T08:35:57.183568
| 2021-04-24T09:02:47
| 2021-04-24T09:02:47
| 262,798,514
| 8
| 1
|
NOASSERTION
| 2020-11-20T11:20:58
| 2020-05-10T13:56:00
|
R
|
UTF-8
|
R
| false
| false
| 2,368
|
r
|
support-functions.R
|
#' @title Change Language to English
#' @description Changes API language to English
#' @export
change_language_to_english <- function() {
Sys.setenv(IBB_LANGUAGE = "en")
}
#' @title Set Query Limit
#' @description Sets maximum number of returned results
#' @param limit Limit of the maximum results
#' @export
set_query_limit <- function(limit = 100) {
Sys.setenv(IBB_QUERY_LIMIT = limit)
}
#' @title Get All Data Sources
#' @param get_all_info If TRUE return the whole list, otherwise just return the required result data frame.
#' @export
get_all_data_sources <- function(get_all_info = FALSE) {
the_result <- jsonlite::fromJSON(call_data_ibb("package_list"))
if (get_all_info) {
return(the_result)
}
return(tibble::as_tibble(the_result$result))
}
#' @title Get Info on a Data Source
#' @param package_id Package ID taken from the package list. Run get_all_detailed_info to see the available IDs
#' @param get_all_info If TRUE return the whole list, otherwise just return the required result data frame.
#' @examples
#' \dontrun{
#' get_detailed_info_on_data_source("594ca91d-0c71-41da-b50a-c7cd30fab67a")
#' }
#' @export
get_detailed_info_on_data_source <- function(package_id, get_all_info = FALSE) {
the_result <- jsonlite::fromJSON(call_data_ibb(paste0("package_show?id=", package_id)))
if (get_all_info) {
return(the_result)
}
return(tibble::as_tibble(the_result$result$resources))
}
#' @title Get Info on All Data Sources
#' @param verbose If TRUE, it prints the track of the data being imported.
#' @export
get_all_detailed_info <- function(verbose = FALSE) {
sources_vec <- get_all_data_sources() %>%
unlist()
all_resources_info <- tibble::tibble()
for (i in seq_along(sources_vec)) {
if (verbose == TRUE) print(paste0("Getting info on: ", sources_vec[i], " Package ", i, "/", length(sources_vec)))
resource_df <- get_detailed_info_on_data_source(package_id = sources_vec[i])
if (!is.null(resource_df)) {
all_resources_info <- resource_df %>%
dplyr::mutate(package_name = sources_vec[i]) %>%
dplyr::bind_rows(all_resources_info)
} else {
print("This package id has no defined resources.")
}
}
tibble::as_tibble(all_resources_info) %>%
dplyr::select(
.data$resource_id,
.data$package_id,
tidyselect::everything()
) %>%
return()
}
|
2493e1d98b9e59470a2dc2864e3378de621e37b2
|
c196e54c5dbf539f0055ad55b5c46abe344c3517
|
/whitefly.R
|
5f86a249aba36f044b0fa249f225f6829b25f504
|
[] |
no_license
|
kien300/whitefly
|
452559cfbb06dd428f9de0bffda6047945b56ce3
|
5060b034e3a24876c739a2901fdf511975488261
|
refs/heads/master
| 2021-05-02T07:16:12.893364
| 2018-02-26T09:24:07
| 2018-02-26T09:24:07
| 120,872,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,571
|
r
|
whitefly.R
|
library(tidyverse) #general data
library(readxl) #import Excel files
library(zoo) #fill in data
library(vcd) #Association Plort
library(fifer) #post-hoc Chi Squared test
library(knitr) #for table format
setwd("D:/OneDrive - CGIAR/CIAT/OTA/Nami'sData_2.9.18")
cm <- read_excel("for analysis/Cambodia.xls")
vn <- read_excel("for analysis/Vietnam.xls")
#Cambodia----------------------
#general data cleaning------
#table(cm$Transect_A_adult)
#sum(is.na(cm$Transect_A_adult))
cm <- cm %>% filter(!is.na(cm$Transect_A_adult)) #879-79=800
cm <- cm %>% do(na.locf(.)) #replace NA with the most recent non-NA
cm$A_Nymph_Class <- NA
#table(cm$Transect_A_nymph) # two 0s --> wrong
cm$Transect_A_nymph[cm$Transect_A_nymph==0] <- 1
cm$A_Nymph_Class[cm$Transect_A_nymph==1] <- "0 to 5"
cm$A_Nymph_Class[cm$Transect_A_nymph==2] <- "5 to 10"
cm$A_Nymph_Class[cm$Transect_A_nymph==3] <- "10 to 25"
cm$A_Nymph_Class[cm$Transect_A_nymph==4] <- "25 to 50"
cm$A_Nymph_Class[cm$Transect_A_nymph==5] <- "50 and more"
#table(cm$A_Nymph_Class)
cm$B_Nymph_Class <- NA
#table(cm$Transect_B_nymph)
cm$B_Nymph_Class[cm$Transect_B_nymph==1] <- "0 to 5"
cm$B_Nymph_Class[cm$Transect_B_nymph==2] <- "5 to 10"
cm$B_Nymph_Class[cm$Transect_B_nymph==3] <- "10 to 25"
cm$B_Nymph_Class[cm$Transect_B_nymph==4] <- "25 to 50"
cm$B_Nymph_Class[cm$Transect_B_nymph==5] <- "50 and more"
#table(cm$B_Nymph_Class)
cm$Transect_A_adult <- as.numeric(cm$Transect_A_adult)
cm$Transect_B_adult <- as.numeric(cm$Transect_B_adult)
#combine A & B Transect
cm_combined <- cm %>%
dplyr::select(District, `Field ID`, Transect_A_adult, Transect_B_adult) %>%
gather(Transect_A_adult, Transect_B_adult, key = 'Transect', value = 'Count')
#test-----
# sum_CM <- cm %>% group_by(Collector, Country, Province, District, `Field ID`) %>%
# summarise(SUM_A_adult = sum(Transect_A_adult),
# SUM_B_adult = sum(Transect_B_adult))
sum_CM <- cm_combined %>% group_by(District, `Field ID`) %>%
summarise(SUM_adult = sum(Count))
# table(sum_CM$Collector, sum_CM$SUM_A_adult)
# table(sum_CM$Province, sum_CM$SUM_A_adult)
# table(sum_CM$District, sum_CM$SUM_A_adult)
#ANOVA for different Districts-----
#Transect A
#boxplot(SUM_A_adult~District, data = sum_CM, horizontal=T) #to make visible
#results1 = aov(SUM_A_adult~District, data = sum_CM)
results1 = aov(SUM_adult~District, data = sum_CM)
#summary(results1) #to make visible
#TukeyHSD(results1) #post-hoc test for pairwise comparision, perform if anova was significant
#p-value=.408, fails to reject the null <=> No difference in Districts mean
#Transect B
#boxplot(SUM_B_adult~District, data = sum_CM, horizontal=T) #to make visible
#results2 = aov(SUM_B_adult~District, data = sum_CM)
#summary(results2) #to make visible
#p-value=.166, fails to reject the null <=> No difference in Districts mean
#test for nymph----
##table(cm$Transect_A_nymph) #make visile for Markdown files
##table(cm$Transect_B_nymph) #make visile for Markdown files
#Vietnam--------------------
#general tidying---------
#table(vn$Transect_A_adult)
vn$Transect_A_adult[vn$Transect_A_adult=='-'] <- 'NA'
#sum(is.na(vn$Transect_A_adult)) #check
vn <- vn %>% filter(!is.na(vn$Transect_A_adult)) #846-116=730
#table(vn$Transect_B_adult)
vn$Transect_B_adult[vn$Transect_B_adult=='-'] <- 'NA'
#sum(is.na(vn$Transect_B_adult)) #check
vn <- vn %>% do(na.locf(.)) #replace NA with the most recent non-NA
vn$Transect_A_adult <- as.numeric(vn$Transect_A_adult)
vn$Transect_B_adult <- as.numeric(vn$Transect_B_adult)
#table(vn$District) #see sample count per District
#eliminate districts with fewer than 5 samples
vn <- vn %>% filter(District != "Thuan Chau" & District != "Van Yen")
#combine A & B Transect
vn_combined <- vn %>%
dplyr::select(District, `Field ID`, Transect_A_adult, Transect_B_adult) %>%
gather(Transect_A_adult, Transect_B_adult, key = 'Transect', value = 'Count')
#test-----
# sum_VN <- vn %>% group_by(Collector, Country, Province, District, `Field ID`) %>%
# summarise(SUM_A_adult = sum(Transect_A_adult, na.rm = TRUE),
# SUM_B_adult = sum(Transect_B_adult, na.rm = TRUE))
sum_VN <- vn_combined %>% group_by(District, `Field ID`) %>%
summarise(SUM_adult = sum(Count, na.rm = TRUE))
#ANOVA for different Districts-----
#Transect A
#boxplot(SUM_A_adult~District, data = sum_VN, horizontal=T) #to make visible
results3 = aov(SUM_adult~District, data = sum_VN)
#summary(results3) #to make visible
#post-hoc test for significant ANOVA #to make visible
tukey3 <- as.data.frame(TukeyHSD(results3)$District)
tukey3 <- rownames_to_column(tukey3, "Pair") %>%
filter(`p adj`<0.05)
#p-value is significant, reject the null(No difference in Districts mean)
#Transect B
#boxplot(SUM_B_adult~District, data = sum_VN, horizontal=T) #to make visible
#results4 = aov(SUM_B_adult~District, data = sum_VN)
#summary(results4) #to make visible
#post-hoc test for significant ANOVA #to make visible
#tukey4 <- as.data.frame(TukeyHSD(results4)$District)
# tukey4 <- rownames_to_column(tukey4, "Pair") %>%
# filter(`p adj`<0.05)
#p-value is significant, fails to reject the null(No difference in Districts mean)
#test for nymph-----
#table(vn$Transect_A_nymph)
vn$Transect_A_nymph[vn$Transect_A_nymph=='0'] <- 'NA'
vn$Transect_A_nymph <- as.numeric(vn$Transect_A_nymph)
#sum(is.na(vn$Transect_A_nymph)) #check
vn$A_Nymph_Class <- NA
vn$A_Nymph_Class[vn$Transect_A_nymph==1] <- "0 to 5"
vn$A_Nymph_Class[vn$Transect_A_nymph==2] <- "5 to 10"
vn$A_Nymph_Class[vn$Transect_A_nymph==3] <- "10 to 25"
vn$A_Nymph_Class[vn$Transect_A_nymph==4] <- "25 to 50"
vn$A_Nymph_Class[vn$Transect_A_nymph==5] <- "50 and more"
vn$A_Nymph_Class <- as.factor(vn$A_Nymph_Class)
#table(vn$A_Nymph_Class)
vn_AN <- vn %>% filter(!is.na(A_Nymph_Class)) %>%
group_by(District, A_Nymph_Class) %>%
summarise(Freq=n())
vn_AN$A_Nymph_Class <- factor(vn_AN$A_Nymph_Class,levels(vn_AN$A_Nymph_Class)[c(1,3,2)])
vn_AN1 <- vn_AN %>% spread(A_Nymph_Class, Freq)
vn_AN1[is.na(vn_AN1)] <- 0
vn_AN1 <- remove_rownames(vn_AN1)
vn_AN1 <- column_to_rownames(vn_AN1, var = "District")
#chisq.test(vn_AN1) #make visible in Markdown
Chisq_vnA <-
chisq.post.hoc(vn_AN1, test = c("fisher.test"), popsInRows = TRUE,control = c("BH"), digits = 4) %>%
filter(adj.p<0.05)
vn_AN2 <- structable(~District+A_Nymph_Class,data=vn_AN)
#assoc(vn_AN2, shade=TRUE) #to make visible
#table(vn$Transect_B_nymph)
vn$Transect_B_nymph[vn$Transect_B_nymph=='0'] <- 'NA'
vn$Transect_B_nymph <- as.numeric(vn$Transect_B_nymph)
#sum(is.na(vn$Transect_B_nymph)) #check
vn$B_Nymph_Class <- NA
vn$B_Nymph_Class[vn$Transect_B_nymph==1] <- "0 to 5"
vn$B_Nymph_Class[vn$Transect_B_nymph==2] <- "5 to 10"
vn$B_Nymph_Class[vn$Transect_B_nymph==3] <- "10 to 25"
vn$B_Nymph_Class[vn$Transect_B_nymph==4] <- "25 to 50"
vn$B_Nymph_Class[vn$Transect_B_nymph==5] <- "50 and more"
vn$B_Nymph_Class <- as.factor(vn$B_Nymph_Class)
#table(vn$B_Nymph_Class)
vn_BN <- vn %>% filter(!is.na(B_Nymph_Class)) %>%
group_by(District, B_Nymph_Class) %>%
summarise(Freq=n())
vn_BN$B_Nymph_Class <- factor(vn_BN$B_Nymph_Class,levels(vn_BN$B_Nymph_Class)[c(1,4,2,3)])
vn_BN1 <- vn_BN %>% spread(B_Nymph_Class, Freq)
vn_BN1[is.na(vn_BN1)] <- 0
vn_BN1 <- remove_rownames(vn_BN1)
vn_BN1 <- column_to_rownames(vn_BN1, var = "District")
#chisq.test(vn_BN1) #to make visible
Chisq_vnB <-
chisq.post.hoc(vn_BN1, test = c("fisher.test"), popsInRows = TRUE,control = c("BH"), digits = 4) %>%
filter(adj.p<0.05)
vn_BN2 <- structable(~District+B_Nymph_Class,data=vn_BN)
#assoc(vn_BN2, shade=TRUE) #to make visible
|
28ba65ea969bbbb06a90e9cd614abd241626e8b1
|
5094ff788262bd7d83741b4404e226f541e1da51
|
/GeneradorOrdenes.R
|
fadd0328f18a0fbfee81408fb368e94e21d81752
|
[
"MIT"
] |
permissive
|
CIAssetManagement/FundSeries
|
7fb4aa3bb3d8baea5600e79ea4a8297adeff6a1d
|
9c233449860158e941833cc9aad962c5a1e3bebd
|
refs/heads/master
| 2021-01-18T17:41:57.100162
| 2018-08-02T16:54:04
| 2018-08-02T16:54:04
| 100,494,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,448
|
r
|
GeneradorOrdenes.R
|
###########################################################################################################
#
# Paso 1, Verificación de la serie
#
###########################################################################################################
#Funciones
options(scipen=999)
source("fondos.R",local=FALSE)
#Paquetes
library(readxl)
library(dplyr)
library(FundTools)
#Directorio de trabajo
archivo <- read_excel("OrdenPrueba.xls")
archivo$Serie <- gsub("'","",archivo$Serie)
diainhabil <- function(fecha){
fechabase0 <- as.Date("2017-08-06")
fechabase1 <- as.Date("2017-08-07")
if(as.integer(fecha - fechabase0 ) %% 7 == 6){
fecha = fecha + 3
}
if(as.integer(fecha - fechabase1 ) %% 7 == 6){
fecha = fecha + 2
}
return(fecha)
}
####################################################################################################
# Fondos a Reclasificar #
####################################################################################################
#fondos <- c('+CIGUB','+CIGUMP','+CIGULP','+CIUSD','+CIEQUS','+CIBOLS','AXESEDM')
fondos <- c('+CIGUB','+CIGUMP','+CIGULP','+CIUSD')
####################################################################################################
# Data frame con las viejas series #
####################################################################################################
datosventa <- archivo %>%
filter(Emisora %in% fondos & CContrato != 22285 & Importe > 0 & Serie != 'BE-0') %>%
group_by(CContrato, Emisora, Serie) %>%
summarise(Titulos = sum(Títulos), Importe = sum(Importe))
####################################################################################################
# Data frame con las nuevas series #
####################################################################################################
datoscompra <- archivo %>%
filter(Emisora %in% fondos & CContrato != 22285 & Importe > 0 & Serie != 'BE-0') %>%
group_by(CContrato, Emisora) %>%
summarise(Titulos = sum(Títulos), Importe = sum(Importe))
#Serie nueva
serie_nueva <- c()
for(i in seq(1,length(datoscompra$Importe),1)){
ind1 <- datosventa$CContrato == datoscompra$CContrato[i]
ind2 <- datosventa$Emisora == datoscompra$Emisora[i]
indices <- ifelse(ind1 == TRUE,ind2,ind1)
tipo <- datosventa$Serie[indices]
if(nchar(tipo) <= 2){
tipo <- substr(tipo,1,1)
} else {
tipo <- strsplit(tipo,"-")[[1]][1]
}
serie_nueva <- c(serie_nueva,serie(datoscompra$Importe[i],as.character(tipo)))
}
datoscompra$Serie <- serie_nueva
####################################################################################################
# Reclasificacion #
####################################################################################################
datos <- merge(datosventa,datoscompra,by.x = c('CContrato','Emisora'),by.y = c('CContrato','Emisora'))
ventas <- ifelse(datos$Serie.x == datos$Serie.y, "No Reclasificar","Reclasificar")
datos$Venta <- ventas
datos <- datos %>% filter(Venta == 'Reclasificar')
datos$Titulos.y <- NULL
datos$Importe.y <- NULL
colnames(datos) <- c('Contrato','Fondo','SerieAnterior','Titulos','Importe','SerieNueva','Accion a realizar')
#Contratos a omitir
omitir <- read_excel('contratos.xlsx')
quitar <- array(match(omitir$Contrato,datos$Contrato))
if(length(quitar) == 0){datos_no_omitidos <- datos} else {datos_no_omitidos <- datos[-quitar,]}
#### Creación del documento csv
datos_finales <- datos_no_omitidos
datos_finales$Fondo <- paste0("'",datos_no_omitidos$Fondo)
write.csv(datos_finales,"NuevasPosiciones.csv",col.names=TRUE)
###########################################################################################################
#
# Paso 2, Compra - Venta de Renta fija
#
###########################################################################################################
#Renta Fija
datos_rentafija <- datos_no_omitidos[datos_no_omitidos$Fondo %in% c('+CIGUB','+CIGUMP','+CIGULP','+CIUSD','+CIPLUS'),]
#Fondo
fondo_venta <- datos_rentafija$Fondo
#Contratos
contratos_venta <- datos_rentafija$Contrato
#Tipo de operacion
operacion_venta <- rep("VTA-SI",length(contratos_venta))
#Serie de la venta
serie_venta <- datos_rentafija$SerieAnterior
#Tipo de valor
tipo_valor_venta <- ifelse(fondo_venta =="+CIEQUS",52,
ifelse(fondo_venta =="+CIBOLS",52,
ifelse(fondo_venta == "AXESEDM",52,51)))
#Precio
precios <- read_excel("Precios.xls",skip = 3)
precios <- data.frame(Emisora=precios$Sociedad,Serie=precios$Clase,Precio=precios$`Precio Contable`)
tipo_valor_precios <- ifelse(precios$Emisora =="+CIEQUS",52,
ifelse(precios$Emisora =="+CIBOLS",52,
ifelse(precios$Emisora == "AXESEDM",52,51)))
precios$id <- paste0(tipo_valor_precios,"-",precios$Emisora,"-",precios$Serie)
prices <- function(tipo,fund,serie){
namesp <- precios$id
namesf <- paste0(tipo,"-",fund,"-",serie)
price <- precios$Precio[which(namesp == namesf)]
return(as.numeric(price))
}
precio_venta <- mapply(prices,tipo_valor_venta,fondo_venta,serie_venta)
#Títulos
titulos_venta <- as.character(datos_rentafija$Titulos)
#Importe de la operacion
importe_ventacompra <- as.numeric(titulos_venta)*precio_venta
importe_venta <- as.character(importe_ventacompra)
#Fecha de Operacion
fecha_operacion <- format(Sys.Date(), "%d/%m/%Y")
foperacion <- rep(fecha_operacion,length(operacion_venta))
#Fecha de liquidacion (en días)
liq <- cbind(c("+CIGUB","+CIPLUS","+CIGUMP","+CIGULP","+CIUSD","+CIEQUS","+CIBOLS","AXESEDM"),c(0,0,2,2,2,2,2,3))
liquidacion <- function(valor){
vector <- match(valor,liq)
fliq <- liq[vector,2]
fechas <- Sys.Date()+as.numeric(fliq)
fechas <- lapply(fechas,diainhabil)
fechas <- do.call("c",fechas)
fliquidacion <- format(fechas,"%d/%m/%Y")
return(fliquidacion)
}
fecha_liquidacion <- liquidacion(fondo_venta)
fliquidacion <- rep(fecha_liquidacion,2)
#Fecha de Captura
#numero <- ifelse(fondo=="+CIGUB",0,ifelse(fondo=="+CIPLUS",0,-1))
fecha_captura <- format(Sys.Date(), "%d/%m/%Y")
fcaptura <- rep(fecha_captura,length(operacion_venta))
#### Compra
#Tipo de operacion
operacion_compra <- rep("Compra Sociedades Inversio",length(contratos_venta))
#Serie
serie_compra <- datos_rentafija$SerieNueva
#Precio
precio_compra <- mapply(prices,tipo_valor_venta,fondo_venta,serie_compra)
#Títulos
titulos_compra <- as.character(importe_ventacompra%/%precio_compra)
#Importe
importe_compra <- as.character(precio_compra*as.numeric(titulos_compra))
#### Creacion del documento txt
operacion <- c(operacion_venta,operacion_compra)
contratos <- rep(contratos_venta,2)
fondo <- rep(fondo_venta,2)
serie <- c(serie_venta,serie_compra)
tipo <- rep(tipo_valor_venta,2)
titulos <- c(titulos_venta,titulos_compra)
precio <- c(precio_venta,precio_compra)
importe <- c(importe_venta,importe_compra)
zero <- as.character(integer(length(fondo)))
documento <- c("",paste0(operacion,"|",contratos,"|",fondo,"|",serie,"|",tipo,"|",titulos,"|",precio,"|",zero,"|",zero,"|",zero,"|",zero,"|",zero,"|",importe,"|",fliquidacion,"|",zero,"|",fcaptura,"|",zero,"|",zero,"|",importe,"|",foperacion,"|",precio,"|",zero))
x <- capture.output(write.table(documento, row.names = FALSE, col.names = FALSE, quote = FALSE))
cat(paste(x, collapse = "\n"), file = "RentaFija.txt")
###########################################################################################################
#
# Paso 3, Compra - Venta de Renta Variable
#
###########################################################################################################
#Renta Variable
datos_rentavariable <- datos_no_omitidos[datos_no_omitidos$Fondo %in% c('AXESEDM','+CIBOLS','+CIEQUS'),]
#Fondo
fondo_venta <- datos_rentavariable$Fondo
#Contratos
contratos_venta <- datos_rentavariable$Contrato
#Tipo de operacion
operacion_venta <- rep("VTA-SI",length(contratos_venta))
#Serie de la venta
serie_venta <- datos_rentavariable$SerieAnterior
#Tipo de valor
tipo_valor_venta <- ifelse(fondo_venta =="+CIEQUS",52,
ifelse(fondo_venta =="+CIBOLS",52,
ifelse(fondo_venta == "AXESEDM",52,51)))
#Precio
precio_venta <- mapply(prices,tipo_valor_venta,fondo_venta,serie_venta)
#Títulos
titulos_venta <- as.character(datos_rentavariable$Titulos)
#Importe de la operacion
importe_ventacompra <- as.numeric(titulos_venta)*precio_venta
importe_venta <- as.character(importe_ventacompra)
#Fecha de Operacion
fecha_operacion <- format(Sys.Date(), "%d/%m/%Y")
foperacion <- rep(fecha_operacion,length(operacion_venta))
#Fecha de liquidacion (en días)
fecha_liquidacion <- liquidacion(fondo_venta)
fliquidacion <- rep(fecha_liquidacion,2)
#Fecha de Captura
#numero <- ifelse(fondo=="+CIGUB",0,ifelse(fondo=="+CIPLUS",0,-1))
fecha_captura <- format(Sys.Date(), "%d/%m/%Y")
fcaptura <- rep(fecha_captura,length(operacion_venta))
#### Compra
#Tipo de operacion
operacion_compra <- rep("Compra Sociedades Inversio",length(contratos_venta))
#Serie
serie_compra <- datos_rentavariable$SerieNueva
#Precio
precio_compra <- mapply(prices,tipo_valor_venta,fondo_venta,serie_compra)
#Títulos
titulos_compra <- as.character(importe_ventacompra%/%precio_compra)
#Importe
importe_compra <- as.character(precio_compra*as.numeric(titulos_compra))
#### Creacion del documento txt
operacion <- c(operacion_venta,operacion_compra)
contratos <- rep(contratos_venta,2)
fondo <- rep(fondo_venta,2)
serie <- c(serie_venta,serie_compra)
tipo <- rep(tipo_valor_venta,2)
titulos <- c(titulos_venta,titulos_compra)
precio <- c(precio_venta,precio_compra)
importe <- c(importe_venta,importe_compra)
zero <- as.character(integer(length(fondo)))
documento <- c("",paste0(operacion,"|",contratos,"|",fondo,"|",serie,"|",tipo,"|",titulos,"|",precio,"|",zero,"|",zero,"|",zero,"|",zero,"|",zero,"|",importe,"|",fliquidacion,"|",zero,"|",fcaptura,"|",zero,"|",zero,"|",importe,"|",foperacion,"|",precio,"|",zero))
x <- capture.output(write.table(documento, row.names = FALSE, col.names = FALSE, quote = FALSE))
cat(paste(x, collapse = "\n"), file = "RentaVariable.txt")
|
44d9a712494df5074a9cd9a02f2767112c8521ae
|
c6c9b0c0ee14337e2d46176bba525c0d85decae6
|
/shinymodule.R
|
b6488df19c9f4a6162402219e590cfbabea33155
|
[] |
no_license
|
benearnthof/FFFT
|
dcde3cc0727a9d6a896c2575433750473bffb142
|
4881f744bb83f9187299caf75e9d09f2ad06bbae
|
refs/heads/master
| 2023-02-09T20:27:04.229070
| 2021-01-04T15:57:14
| 2021-01-04T15:57:14
| 261,377,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,761
|
r
|
shinymodule.R
|
library(shiny)
counterButton <- function(id, label = "Counter") {
ns <- NS(id)
tagList(
actionButton(ns("button"), label = label),
verbatimTextOutput(ns("out"))
)
}
counter <- function(input, output, session) {
count <- reactiveVal(0)
observeEvent(input$button, {
count(count() + 1)
})
output$out <- renderText({
count()
})
count
}
csvFileInput <- function(id, label = "CSV file") {
# Create a namespace function using the provided id
ns <- NS(id)
# all input or output IDs of any kind need to be wrapped in a call to ns()
# wrap everything in a tagList to return multiple UI elements
tagList(
fileInput(ns("file"), label),
checkboxInput(ns("heading"), "Has heading"),
selectInput(ns("quote"), "Quote", c(
"None" = "",
"Double quote" = "\"",
"Single quote" = "'"
))
)
}
# Module server function
csvFile <- function(input, output, session, stringsAsFactors) {
# The selected file, if any
userFile <- reactive({
# If no file is selected, don't do anything
validate(need(input$file, message = FALSE))
input$file
})
# The user's data, parsed into a data frame
dataframe <- reactive({
read.csv(userFile()$datapath,
header = input$heading,
quote = input$quote,
stringsAsFactors = stringsAsFactors)
})
# We can run observers in here if we want to
observe({
msg <- sprintf("File %s was uploaded", userFile()$name)
cat(msg, "\n")
})
# Return the reactive that yields the data frame
return(dataframe)
}
ui <- fluidPage(
counterButton("counter1", "Counter #1"),
csvFileInput("csvinput1", ".csv Input")
)
server <- function(input, output, session) {
callModule(counter, "counter1")
callModule(csvFile, "csvinput1")
}
shinyApp(ui, server)
linkedScatterUI <- function(id) {
ns <- NS(id)
fluidRow(
# brushed points can be used to select points in a plot
column(6, plotOutput(ns("plot1"), brush = ns("brush"))),
column(6, plotOutput(ns("plot2"), brush = ns("brush")))
)
}
linkedScatter <- function(input, output, session, data, left, right) {
# Yields the data frame with an additional column "selected_"
# that indicates whether that observation is brushed
dataWithSelection <- reactive({
brushedPoints(data(), input$brush, allRows = TRUE)
})
output$plot1 <- renderPlot({
scatterPlot(dataWithSelection(), left())
})
output$plot2 <- renderPlot({
scatterPlot(dataWithSelection(), right())
})
return(dataWithSelection)
}
scatterPlot <- function(data, cols) {
ggplot(data, aes_string(x = cols[1], y = cols[2])) +
geom_point(aes(color = selected_)) +
scale_color_manual(values = c("black", "#66D65C"), guide = FALSE)
}
|
7ee5a46a819252586920fe4be5e1bb1043e1f20b
|
a6e4f9bfde9278133f702383542b0148307ec2dc
|
/finalmodel/residmodel.R
|
a92dd95f206c44a66d19005db477220ac7bd7ec9
|
[] |
no_license
|
dill/giam-italy
|
024682da7c61f10d7ae917cf7000c37a86294981
|
7457e445f675ae8d90f8ed3593959e9362dce121
|
refs/heads/master
| 2021-01-01T05:36:51.123893
| 2011-09-12T11:59:38
| 2011-09-12T11:59:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,267
|
r
|
residmodel.R
|
# fit a model to the residuals to see what's left
# relies on you having run model.R FIRST
# load soem useful libraries
library(maps)
library(mapdata)
library(soap)
library(dillhandy)
#load("fullmod-Gamma.RData")
load("fullmod-Tweedie(1.2).RData") # load the data
# Italy boundary
it<-list(x=fixdat$italy$map$km.e,y=fixdat$italy$map$km.n)
# Sardinia boundary
sa<-list(x=fixdat$sardinia$map$km.e,y=fixdat$sardinia$map$km.n)
# Sicily boundary
sc<-list(x=fixdat$sicily$map$km.e,y=fixdat$sicily$map$km.n)
########################
# Italy
it.dat<-list(x=fixdat$italy$dat$km.e,
y=fixdat$italy$dat$km.n,
year=fixdat$italy$dat$year,
share_100=residuals(it.soap,type="deviance"))
# basis size
it.bsize<-c(20,6)
# setup the soap knots
soap.knots.it<-make_soap_grid(it,c(15,15))
soap.knots.it<-pe(soap.knots.it,-c(1,46)) #15 x15
it.soap<- gam(share_100~
te(x,y,year,bs=c("sf","cr"),k=it.bsize,d=c(2,1),xt=list(list(bnd=list(it)),NULL))+
te(x,y,year,bs=c("sw","cr"),k=it.bsize,d=c(2,1),xt=list(list(bnd=list(it)),NULL))-1
,knots=soap.knots.it,data=it.dat,method="REML")
##########################
gc()
#########################
## Sardinia
sa.dat<-list(x=fixdat$sardinia$dat$km.e,
y=fixdat$sardinia$dat$km.n,
year=fixdat$sardinia$dat$year,
share_100=residuals(sa.soap,type="deviance"))
soap.knots.sa<-make_soap_grid(sa,c(5,6))
sa.ksize<-c(8,6)
sa.soap<- gam(share_100~
te(x,y,year,bs=c("sf","cr"),k=sa.ksize,d=c(2,1),xt=list(list(bnd=list(sa)),NULL))+
te(x,y,year,bs=c("sw","cr"),k=sa.ksize,d=c(2,1),xt=list(list(bnd=list(sa)),NULL))
,knots=soap.knots.sa,data=sa.dat,method="REML")
##########################
gc()
########################
# Sicily
sc.dat<-list(x=fixdat$sicily$dat$km.e,
y=fixdat$sicily$dat$km.n,
year=fixdat$sicily$dat$year,
share_100=residuals(sc.soap,type="deviance"))
# setup the soap knots
soap.knots.sc<-make_soap_grid(sc,c(6,6))
sc.bsize<-c(10,6)
sc.soap<- gam(share_100~
te(x,y,year,bs=c("sf","cr"),k=sc.bsize,d=c(2,1),xt=list(list(bnd=list(sc)),NULL))+
te(x,y,year,bs=c("sw","cr"),k=sc.bsize,d=c(2,1),xt=list(list(bnd=list(sc)),NULL))
,knots=soap.knots.sc,data=sc.dat,method="REML")
##########################
gc()
########################
# now make the image plot
# options
grid.res.x<-100
grid.res.y<-60
years<-as.numeric(levels(as.factor(it.dat$year)))
# setup the prediction grid
xmin<-min(c(it$x,sa$x,sc$x))
ymin<-min(c(it$y,sa$y,sc$y))
xmax<-max(c(it$x,sa$x,sc$x))
ymax<-max(c(it$y,sa$y,sc$y))
xm <- seq(xmin,xmax,length=grid.res.x);yn<-seq(ymin,ymax,length=grid.res.y)
xx <- rep(xm,grid.res.y);yy<-rep(yn,rep(grid.res.x,grid.res.y))
im.mat<-matrix(NA,length(years),grid.res.x*grid.res.y)
# which grid points relate to which places?
it.onoff<-inSide(it,xx,yy)
sa.onoff<-inSide(sa,xx,yy)
sc.onoff<-inSide(sc,xx,yy)
# do the prediction
for (i in 1:length(years)){
pred.grid<-list(x=xx,y=yy,year=rep(years[i],length(xx)))
# italy
im.mat[i,it.onoff]<-predict(it.soap,pe(pred.grid,it.onoff),type="response")
# sardinia
im.mat[i,sa.onoff]<-predict(sa.soap,pe(pred.grid,sa.onoff),type="response")
# sicily
im.mat[i,sc.onoff]<-predict(sc.soap,pe(pred.grid,sc.onoff),type="response")
}
# limits for the plot
xlim<-c(xm[1]-25,xm[length(xm)])
ylim<-c(yn[1]-25,yn[length(yn)]+25)
zlim<-c(min(im.mat,na.rm=TRUE),max(im.mat,na.rm=TRUE))
######################
# SAVE
######################
save.image(paste("residmod-",it.soap$family[[1]],".RData",sep=""))
#pdf(paste("residmaps-",it.soap$family[[1]],".pdf",sep=""),width=9)
postscript(paste("residmaps-",it.soap$family[[1]],".ps",sep=""),width=9)
par(mfrow=c(2,3),mar=c(4.5,4.5,2,2))
for (i in 1:length(years)){
# plot the image
image(z=matrix(im.mat[i,],grid.res.x,grid.res.y),x=xm,y=yn,
col=heat.colors(100),xlab="km (e)",ylab="km (n)",
main=years[i],asp=1,cex.main=1.4,
cex.lab=1.4,cex.axis=1.3,zlim=zlim,xlim=xlim,ylim=ylim)
# then the contour ontop
contour(xm,yn,matrix(im.mat[i,],grid.res.x,grid.res.y),
levels=seq(zlim[1],zlim[2],by=1),col="blue",add=TRUE)
# then the country borders
lines(it,lwd=2)
lines(sa,lwd=2)
lines(sc,lwd=2)
}
dev.off()
|
dd538f646fef05b26af9924272210a959c2b1f3e
|
6e32987e92e9074939fea0d76f103b6a29df7f1f
|
/googleautomlv1beta1.auto/man/TextExtractionModelMetadata.Rd
|
8b68866439835a310fa713bd873373f964ee2ff8
|
[] |
no_license
|
justinjm/autoGoogleAPI
|
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
|
6a26a543271916329606e5dbd42d11d8a1602aca
|
refs/heads/master
| 2023-09-03T02:00:51.433755
| 2023-08-09T21:29:35
| 2023-08-09T21:29:35
| 183,957,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 611
|
rd
|
TextExtractionModelMetadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/automl_objects.R
\name{TextExtractionModelMetadata}
\alias{TextExtractionModelMetadata}
\title{TextExtractionModelMetadata Object}
\usage{
TextExtractionModelMetadata(modelHint = NULL)
}
\arguments{
\item{modelHint}{Indicates the scope of model use case}
}
\value{
TextExtractionModelMetadata object
}
\description{
TextExtractionModelMetadata Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Model metadata that is specific to text extraction.
}
\concept{TextExtractionModelMetadata functions}
|
61b5111ef5e5b999f227029ba06b373f267165fe
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125609-test.R
|
dc187d91635c2321fcdec0dc58a0e10d31537382
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
1613125609-test.R
|
testlist <- list(A = structure(c(1.08768969560471e-43, 1.26371023742382e+225, 2.19450232779207e+294, 2.44323852959515e-308, 9.69722668877813e-232, 2.40225282744071e-169), .Dim = c(6L, 1L)), B = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
b85c73b2b8117c13984e84bbb088d0f4b61909ef
|
bd2122f8c31442514f5a87dc4bab42a535825c4f
|
/plot4.R
|
0a72e4d66e145f25a95322369213269fe1b509fc
|
[] |
no_license
|
RamanChoudhury/ExData_Plotting1
|
d94e5fdcdb7a1798e0150619010a8abf7823e65b
|
743bd2fb8b1c2c81de215747173cab1c3839590f
|
refs/heads/master
| 2021-01-14T12:40:08.915020
| 2016-07-17T14:04:09
| 2016-07-17T14:04:09
| 63,393,147
| 0
| 0
| null | 2016-07-15T05:04:08
| 2016-07-15T05:04:08
| null |
UTF-8
|
R
| false
| false
| 2,703
|
r
|
plot4.R
|
library(lubridate)
library(dplyr)
##reading the whole data into a data frame with character colClasses
total_data<-read.table("./household_power_consumption.txt",sep=";",header=T,stringsAsFactors = FALSE)
#total_data<-transform(total_data,Date=dmy(Date),Time=hms(Time),Global_active_power=as.numeric(Global_active_power),Global_reactive_power=as.numeric(Global_reactive_power),Voltage=as.numeric(Voltage),Global_intensity=as.numeric(Global_intensity),Sub_metering_1=as.numeric(Sub_metering_1),Sub_metering_2=as.numeric(Sub_metering_2),Sub_metering_3=as.numeric(Sub_metering_3))
total_data$date_time<-paste(total_data$Date,total_data$Time,sep=" ")
total_data<-transform(total_data,date_time=dmy_hms(date_time))
#Subsetting Data from 1 Feb 2007 & 2 Feb 2007
subset_data<-subset(total_data,dmy(Date)==dmy("1 feb 2007") | dmy(Date)==dmy("2 feb 2007"))
#order the subset data
subset_data<-subset_data[order(subset_data$date_time),]
#transform the rest of the data columns into numeric
subset_data<-transform(subset_data,Global_active_power=as.numeric(Global_active_power))
subset_data<-transform(subset_data,Global_reactive_power=as.numeric(Global_reactive_power))
subset_data<-transform(subset_data,Voltage=as.numeric(Voltage))
subset_data<-transform(subset_data,Global_intensity=as.numeric(Global_intensity))
subset_data<-transform(subset_data,Sub_metering_1=as.numeric(Sub_metering_1))
subset_data<-transform(subset_data,Sub_metering_2=as.numeric(Sub_metering_2))
subset_data<-transform(subset_data,Sub_metering_3=as.numeric(Sub_metering_3))
#Initiate PNG Device
png(filename="plot4.png")
#Prepare the layout for a 2x2 theme
par(mfrow=c(2,2))
par(mar=c(4,4,2,2))
par(oma=c(1,1,1,1))
#DRAW PLOT 1
plot(subset_data$date_time,subset_data$Global_active_power,xlab="",ylab="Global Active Power (kilowatts)",type="l")
#DRAW PLOT 2
plot(subset_data$date_time,subset_data$Voltage,xlab="datetime",ylab="Voltage",type="l")
#DRAW PLOT 3
with(subset_data,plot(subset_data$date_time,subset_data$Sub_metering_1,col="black",xlab="",ylab="Energy Sub-Metering",type="n"))
with(subset_data,points(subset_data$date_time,subset_data$Sub_metering_1,col="black",type="l"))
with(subset_data,points(subset_data$date_time,subset_data$Sub_metering_2,col="red",type="l"))
with(subset_data,points(subset_data$date_time,subset_data$Sub_metering_3,col="blue",type="l"))
#draw Legend
legend("topright",pch="__",col=c("black","red","blue"),legend=c("Sub-metering_1","Sub-metering_2","Sub-metering_3"),bty="n",cex=0.5)
#DRAW PLOT 4
plot(subset_data$date_time,subset_data$Global_reactive_power,xlab="datetime",ylab="Global_reactive_power",type="l")
#Device Off
dev.off()
|
b9bc3f960988f97bf90aba7cf4ac134e15f1c0b5
|
6034d565642a30876b7b7a025b74a31580c44613
|
/R/datasets.R
|
e71c10bf9610ecdd8995a177285298757fa6a258
|
[] |
no_license
|
cran/parameters
|
a95beba8c8bd820a88b74ca407609cc08a62fcab
|
f19575ccdbbd303a1896a13d8b4b8210563cabfa
|
refs/heads/master
| 2023-06-08T08:58:24.080762
| 2023-05-26T09:20:02
| 2023-05-26T09:20:02
| 211,083,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 921
|
r
|
datasets.R
|
#' @docType data
#' @title Sample data set
#' @name fish
#' @keywords data
#'
#' @description A sample data set, used in tests and some examples.
NULL
#' @docType data
#' @title Sample data set
#' @name qol_cancer
#' @keywords data
#'
#' @description A sample data set with longitudinal data, used in the vignette describing the `datawizard::demean()` function. Health-related quality of life from cancer-patients was measured at three time points (pre-surgery, 6 and 12 months after surgery).
#'
#' @format A data frame with 564 rows and 7 variables:
#' \describe{
#' \item{ID}{Patient ID}
#' \item{QoL}{Quality of Life Score}
#' \item{time}{Timepoint of measurement}
#' \item{age}{Age in years}
#' \item{phq4}{Patients' Health Questionnaire, 4-item version}
#' \item{hospital}{Hospital ID, where patient was treated}
#' \item{education}{Patients' educational level}
#' }
NULL
|
bb8be5d14f0bc731bab9efdaca950df5d52a9adb
|
da20f463f5425a6540c6613ea280c03220414b6c
|
/plot4.R
|
321f0438a8d39a223269a0489cfe02b24a664fe3
|
[] |
no_license
|
samyeager/ExData_Plotting1
|
7c374cb04157e9b135594a01262e812a482b3b20
|
133ae903bd548b52ba067702d4beb0d208773f2f
|
refs/heads/master
| 2020-12-30T18:57:51.091224
| 2014-08-10T21:35:16
| 2014-08-10T21:35:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,021
|
r
|
plot4.R
|
## Plots a 2x2 plotting grid for the "household_power_consumption.txt" dataset.
## The grid plots are:
## top-left: date+time v. Global Active Power
## bottom-left: date+time v. active energy by sub-meter
## top-right: date+time v. Voltage
## bottom-right: date+time v. Global Reactive Power
## Assumes "household_power_consumption.txt" is in working directory.
plot4 <- function() {
# read and condition the data
col_classes <- c("character", "character", "double", "double", "double",
"double", "double", "double", "double")
data <- read.delim("household_power_consumption.txt", sep = ";",
na.strings = "?", colClasses = col_classes)
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007", ]
datetime <- strptime(paste(data$Date, data$Time, sep = " "),
"%d/%m/%Y %H:%M:%S")
data <- data.frame(datetime, data[, 3:9])
# open the file device, set the plotting grid and plot
png(file = "plot4.png", height = 504, width = 504, bg = "transparent")
par(mfcol = c(2,2)) # set grid
with(data, {
# plot top-left graph
plot(datetime, Global_active_power,
type = "n",
xlab = "",
ylab = "Global Active Power")
lines(datetime, Global_active_power)
# plot bottom-left graph
plot(datetime, Sub_metering_1,
type = "n",
xlab = "",
ylab = "Energy sub metering")
lines(datetime, Sub_metering_1, col = "black")
lines(datetime, Sub_metering_2, col = "red")
lines(datetime, Sub_metering_3, col = "blue")
legend("topright", lwd = 1,
bty = "n",
col = c("black", "red", "blue"),
legend = names(data[6:8]))
# plot top-right graph
plot(datetime, Voltage, type = "n")
lines(datetime, Voltage)
# plot bottom-right graph
plot(datetime, Global_reactive_power, type = "n")
lines(datetime, Global_reactive_power)
})
# close the file device
dev.off()
}
|
66bdccca6594f579f4b5ffa2228b37086f6a4af2
|
5b532b0284f115cb9b96800d92f35d76283c827f
|
/R/ICM-methods.r
|
e64249e471da9b26bf7d88e39f7b488127d5845e
|
[] |
no_license
|
ge11232002/TFBSTools
|
cc5dc41216e951a3ac4904f8fd5a3b5ef47ed601
|
1bda142f74c546c9c58242686e5d52f4d9a0bbbd
|
refs/heads/master
| 2021-09-15T23:23:47.428928
| 2021-09-10T21:18:42
| 2021-09-10T21:18:42
| 46,871,450
| 22
| 10
| null | 2021-09-10T20:40:06
| 2015-11-25T15:53:42
|
R
|
UTF-8
|
R
| false
| false
| 7,948
|
r
|
ICM-methods.r
|
### ------------------------------------------------------------------------
### The "ICM" generic and methods
setMethod("toICM", "character",
function(x, pseudocounts=0.8, schneider=FALSE,
bg=c(A=0.25, C=0.25, G=0.25, T=0.25)){
dnaset = DNAStringSet(x)
toICM(dnaset, schneider=schneider,
pseudocounts=pseudocounts,
bg=bg)
}
)
setMethod("toICM", "DNAStringSet",
function(x, pseudocounts=0.8, schneider=FALSE,
bg=c(A=0.25, C=0.25, G=0.25, T=0.25)){
if(!isConstant(width(x)))
stop("'x' must be rectangular (i.e. have a constant width)")
pfm = consensusMatrix(x)
toICM(pfm, schneider=schneider, pseudocounts=pseudocounts,
bg=bg)
}
)
setMethod("toICM", "PFMatrix",
function(x, pseudocounts=0.8, schneider=FALSE, bg=NULL){
if(is.null(bg))
bg = bg(x)
icmMatrix = toICM(Matrix(x), pseudocounts=pseudocounts,
schneider=schneider, bg=bg)
icm = ICMatrix(ID=ID(x), name=name(x), matrixClass=matrixClass(x),
strand=strand(x), bg=bg,
tags=tags(x), profileMatrix=icmMatrix,
pseudocounts=pseudocounts, schneider=schneider)
return(icm)
}
)
setMethod("toICM", "PFMatrixList",
function(x, pseudocounts=0.8, schneider=FALSE, bg=NULL){
ans <- lapply(x, toICM, pseudocounts=pseudocounts,
schneider=schneider, bg=bg)
ans <- do.call(ICMatrixList, ans)
return(ans)
})
### Assumes 'x' is a Position *Frequency* Matrix (PFM) and computes the
### corresponding Information *Content* Matrix (ICM).
schneider_Hnb_precomputed = function(colsum){
if(colsum %% 1 != 0)
stop("the colsums must be integers")
if(colsum < 1 || colsum > 30)
stop("Precomputed params only available for colsums 1 to 30)")
precomputed = c(0, 0.75, 1.11090234442608, 1.32398964833609,
1.46290503577084,
1.55922640783176, 1.62900374746751, 1.68128673969433,
1.7215504663901, 1.75328193031842, 1.77879136615189,
1.79965855531179, 1.81699248819687, 1.8315892710679,
1.84403166371213, 1.85475371994775, 1.86408383599326,
1.87227404728809, 1.87952034817826, 1.88597702438913,
1.89176691659196, 1.89698887214968, 1.90172322434865,
1.90603586889234, 1.90998133028897, 1.91360509239859,
1.91694538711761, 1.92003457997914, 1.92290025302018,
1.92556605820924
)
return(precomputed[colsum])
}
schneider_Hnb_exact = function(colsum, bg_probabilities){
## This function is validated with the precomputed above.
isFlat = length(unique(bg_probabilities)) == 1
if(colsum == 1)
return(0)
ns = c(na=colsum, nc=0, ng=0, nt=0)
E_Hnb = 0
while(1){
Pnb = factorial(colsum) /
(factorial(ns["na"]) * factorial(ns["nc"]) *
factorial(ns["ng"]) * factorial(ns["nt"])) *
prod(bg_probabilities^ns)
Hnb = -1 * sum((ns/colsum * log2(ns/colsum))[is.finite(log2(ns/colsum))])
E_Hnb = E_Hnb + Pnb * Hnb
if(ns["nt"] != 0){
if(ns["ng"] != 0){
ns["ng"] = ns["ng"] - 1
ns["nt"] = ns["nt"] + 1
}else if(ns["nc"] != 0){
ns["nc"] = ns["nc"] - 1
ns["ng"] = ns["nt"] + 1
ns["nt"] = 0
}else if(ns["na"] != 0){
ns["na"] = ns["na"] - 1
ns["nc"] = ns["nt"] + 1
ns["nt"] = 0
}else
break
}else{
if(ns["ng"] != 0){
ns["ng"] = ns["ng"] - 1
ns["nt"] = ns["nt"] + 1
}else if(ns["nc"] != 0){
ns["nc"] = ns["nc"] - 1
ns["ng"] = ns["ng"] + 1
}else{
ns["na"] = ns["na"] - 1
ns["nc"] = ns["nc"] + 1
ns["nt"] = 0
}
}
}
return(E_Hnb)
}
schneider_Hnb_approx = function(colsum, Hg){
ans = Hg - 3 / (2 * log(2) * colsum)
return(ans)
}
schneider_correction = function(x, bg_probabilities){
EXACT_SCHNEIDER_MAX = 30
Hg = -sum(bg_probabilities * log2(bg_probabilities))
isFlat = length(unique(bg_probabilities)) == 1
saved_Hnb = c()
for(colsum in unique(colSums(x))){
if(colsum <= EXACT_SCHNEIDER_MAX){
if(isFlat)
Hnb = schneider_Hnb_precomputed(colsum)
else
Hnb = schneider_Hnb_exact(colsum, bg_probabilities)
}else{
Hnb = schneider_Hnb_approx(colsum, Hg)
}
saved_Hnb[as.character(colsum)] = Hnb
}
Hnbs = saved_Hnb[as.character(colSums(x))]
return(-Hg + Hnbs)
}
setMethod("toICM", "matrix",
## This is validated by the TFBS perl module implemenation.
function(x, pseudocounts=0.8,
## This is the recommended value from
## http://nar.oxfordjournals.org/content/37/3/939.long.
schneider=FALSE,
bg=c(A=0.25, C=0.25, G=0.25, T=0.25)){
x = normargPfm(x)
## From here 'x' is guaranteed to have at least 1 column and to have
## all its columns sum to the same value.
## In fact, these columns sum could be different...
## Modify the .normargPfm a little bit.
bg= normargPriorParams(bg)
#nseq = sum(x[ ,1L])
nseq = colSums(x)
priorN = sum(bg)
pseudocounts = rep(0, ncol(x)) + pseudocounts
#if(length(pseudocounts) == 1)
#p = (x + bg_probabilities*pseudocounts) / (nseq + pseudocounts)
# p = sweep(x + bg_probabilities*pseudocounts, MARGIN=2, nseq + pseudocounts, "/")
#else
#p = (x + bg_probabilities %*% t(pseudocounts)) / (nseq + pseudocounts)
p = sweep(x + bg %*% t(pseudocounts), MARGIN=2,
nseq + priorN * pseudocounts, "/")
D = log2(nrow(x)) + colSums(p * log2(p), na.rm=TRUE)
#ICMMatrix = t(t(p) * D)
ICMMatrix = sweep(p, MARGIN=2, D, "*")
## This core function might be better than the operation above
if(schneider){
correntedColSums = colSums(ICMMatrix) + schneider_correction(x, bg)
ICMMatrix = sweep(ICMMatrix, MARGIN=2,
correntedColSums/colSums(ICMMatrix), "*")
}
return(ICMMatrix)
}
)
### --------------------------------------------------------------------
### Plot the seqlogo
###
#setMethod("plotLogo", "ICMatrix",
# function(x, ic.scale = TRUE, xaxis = TRUE, yaxis = TRUE,
# xfontsize = 15, yfontsize = 15){
# m = Matrix(x)
# m = sweep(m, MARGIN=2, colSums(m), "/")
# m = makePWM(m)
# seqLogo(m, ic.scale = ic.scale, xaxis = xaxis, yaxis = yaxis,
# xfontsize = xfontsize, yfontsize = yfontsize)
# }
# )
setMethod("seqLogo", "ICMatrix",
function(x, ic.scale = TRUE, xaxis = TRUE, yaxis = TRUE,
xfontsize = 15, yfontsize = 15){
m <- Matrix(x)
m <- sweep(m, MARGIN=2, colSums(m), "/")
## Deal with the case when information content is 0 for all bases.
m[is.nan(m)] <- 0.25
m <- makePWM(m)
seqLogo::seqLogo(m, ic.scale = ic.scale,
xaxis = xaxis, yaxis = yaxis,
xfontsize = xfontsize, yfontsize = yfontsize)
}
)
### ----------------------------------------------------------------------
### Utilities methods
### Exported!
setMethod("totalIC", "ICMatrix",
function(x) colSums(Matrix(x))
)
|
e819202a1280cb13d8eb6f37469213a22b5d20ab
|
e35092ffd25f20bdd5f52948f9ff276088ed2836
|
/class_script.R
|
6462b560d15716bef5a3f86a68c64cdec45ae6bf
|
[] |
no_license
|
tisem-digital-marketing/smwa-networks-inter
|
a5825b43fdfc6653e51292b8ba8cc4c51aae4830
|
382f641ca38538ad271c725a0a1de614788f1542
|
refs/heads/main
| 2023-05-09T11:53:49.587725
| 2021-05-27T12:25:02
| 2021-05-27T12:25:02
| 371,324,716
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,478
|
r
|
class_script.R
|
library(readr)
library(tidygraph)
library(ggraph)
library(dplyr)
library(tidyr)
library(tibble)
library(igraph)
# --- Download the Data --- #
url <- "https://github.com/rfordatascience/tidytuesday/raw/master/tidytuesday_tweets/data.rds"
out_file <- "data/tt_tweets.rds"
download.file(url, destfile = out_file, mode = "wb")
# --- Load the data into R --- #
tweets <- read_rds('data/tt_tweets.rds')
# --- Construct a Mentions Network --- #
connections <-
tweets %>%
filter(mentions_screen_name != 'NA') %>%
select(from = screen_name, to = mentions_screen_name)
connections <-
connections %>%
unnest_longer(to) %>%
filter(from != to)
# --- Sample the network --- #
set.seed(1234567890)
tweet_authors <-
as_tibble(
unique(connections$from)
)
seed_users <- sample_n(tweet_authors, 250)
connections_sample <-
connections %>%
filter(from %in% seed_users$value)
first_step <- unique(connections_sample$to)
smple_users <- unique(c(seed_users$value, first_step))
edgelist <-
connections %>%
filter(from %in% smple_users,
to %in% smple_users) %>%
distinct()
# --- Create a Network Object --- #
tg <-
as_tbl_graph(edgelist) %>%
convert(to_undirected) %>%
convert(to_simple)
# --- Properties of a Network --- #
# number of nodes (individuals)
gorder(tg)
# how many connections are there?
gsize(tg)
# what is the max number of connections in this network?
max_connections <- 0.5 * (gorder(tg) * (gorder(tg) - 1))
print(max_connections)
# how dense is my network?
gsize(tg) / max_connections
# --> that's quite sparse, approx 0.5 of one percent of all connections
# alternatively, just do this
edge_density(tg)
# probability that adjacent nodes are connected
transitivity(tg, type = 'undirected')
# --- Node Influentiality --- #
# different metrics we can use ..
# degree: number connections of a node
# betweeness: measures shortest paths between people
# eigenvector ('prestige'): influential when connected to more connected nodes
tg <-
tg %>%
activate(nodes) %>%
mutate(degree = centrality_degree(),
betweenness = centrality_betweenness(),
eigen = centrality_eigen(),
pagerank = centrality_pagerank()
)
centrality_table <-
tg %>%
activate(nodes) %>%
as_tibble()
# in the lab ... you'll look at how these rankings are similar
# across different centrality measures
# --- Community Detection --- #
# ran community detection using 'infomap' algorithm
tg <-
tg %>%
activate(nodes) %>%
mutate(grp_info = group_louvain())
# how many communities to I find?
tg %>%
activate(nodes) %>%
as_tibble() %>%
summarise(max_grp = max(grp_info))
# how large are these groups?
grp_size <-
tg %>%
activate(nodes) %>%
as_tibble() %>%
group_by(grp_info) %>%
count()
tg_plot <-
tg %>%
activate(nodes) %>%
filter(grp_info %in% c(1, 2, 3, 4, 5))
tg_plot %>%
ggraph(layout = 'fr') +
geom_node_point(aes(color = as.factor(grp_info))) +
geom_edge_link(alpha = 0.2) +
theme_void()
#' summary:
#' - sampled a network starting with a set of seed users, then i expanded (by one)
#' - summary stats of a network ... size, density and so on
#' - measure influence based on node connectedness
#' - can i split a network into multiple communities
#' - (lab) within each community can i look for influential nodes
|
ff29d5131498bc4b60a5371192664bd0c7da2d52
|
ad0f62efae3e4660fd46d0678e71d52dfbbf74ff
|
/R/salinity-model-functions.R
|
9612f333b5f10a2503da46fe939da0328ff6a709
|
[
"MIT"
] |
permissive
|
gopalpenny/deltasalinity
|
8b781acaa0cac772cb9be9b40f03f742484067fc
|
926034e7668b58fc5142934a9fec307fb2ef1728
|
refs/heads/main
| 2023-01-22T21:06:26.357754
| 2020-12-03T19:12:09
| 2020-12-03T19:12:09
| 317,027,169
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,449
|
r
|
salinity-model-functions.R
|
# salinity_model_functions.R
#' Simulate salinity, re-initializing each year
#'
#' @param Q_df A \code{data.frame} containing \code{Q_cumec} and \code{year} columns. The rows must be in order of consecutive days.
#' @param v Vector of length 4 containing log parameter values: \code{log(a), log(b), log(d), and log(C_d)}
#' @export
#' @return
#' A vector of salinity corresponding to input Q_cumec, with each year simulated separately
#' @examples
#' library(ggplot2)
#'
#' # Create new data.frame with streamflow input variables
#' streamflow_df <- ganges_streamflow
#'
#' # Simulate salinity each year
#' streamflow_df$S_ppm <- sim_salin_annual(ganges_streamflow, ganges_params$param)
#'
#' # Plot the output
#' ggplot(streamflow_df) + geom_line(aes(yday,S_ppm, color = group))
sim_salin_annual <- function(Q_df,v) {
if (!("year" %in% names(Q_df))) {
Q_df$year <- as.numeric(strftime(Q_df$date,"%Y"))
}
Q_split <- split(Q_df$Q_cumec,Q_df$year)
if (!identical(Q_df$Q_cumec, as.numeric(do.call(c,Q_split)))) {
stop("Ensure that Q_df is ordered so that each year is clustered together")
}
S_synth_df <- do.call(c,lapply(Q_split,sim_salin,v=v))
S_synth <- as.numeric(S_synth_df)
return(S_synth)
}
#' Simulate salinity for a timeseries of streamflow
#' @param Q_ts Timeseries of daily streamflow values
#' @param v Vector of length 4 containing log parameter values: \code{log(a), log(b), log(d), and log(C_d)}
#' @param salin_init Initial salinity for simulation
#' @param salin_min Minimum value of salinity
#' @export
#' @examples
#' streamflow_df <- ganges_streamflow[ganges_streamflow$date < "2000-01-01",]
#' # Output salinity in ppm
#' streamflow_df$S_ppm <- sim_salin(streamflow_df$Q_cumec, ganges_params$param)
#' head(streamflow_df)
sim_salin=function(Q_ts, v, salin_init = 100, salin_min=100) {
#Initialize #exp to be positive
Cobs_init <- salin_init
parms=c(a=exp(v[1]),
b=exp(v[2]),
d=exp(v[3]),
C_d=exp(v[4]))
gradfun <- function(t,C,parms) {
#see https://stackoverflow.com/questions/21557634/using-a-time-series-of-parameters-to-solve-ode-in-r
Q <- Q_ts[pmax(1,ceiling(t))]
with(as.list(c(parms,C,Q)),{
list(max(-a*Q*C + b*exp(-d*Q)*(C_d-C),-(C-salin_min)), NULL)
})
}
# gradfun(1,3000,parms)
times=seq(1,length(Q_ts),by=1)
salinity <- deSolve::ode(c(Cobs_init),times,gradfun,parms,method="rk4")[,2]
#return SSE
return(salinity)
}
|
ceb27c0baadce070ef181fd48f1f2d59ef1107f7
|
619c0ba0282a4c2cb9a1b20a14536ef82dc46e8f
|
/R/ShannonDiversity.R
|
ab4dcba72d308618dcfc225b518751848e6d8bb1
|
[] |
no_license
|
SEELab/enaR
|
796b51159ca43d2338ef441022e2077db516bc7f
|
281a0c71f83fb4659c9300801e41d09729dbd261
|
refs/heads/develop
| 2023-04-26T01:58:20.788858
| 2023-04-22T20:24:54
| 2023-04-22T20:24:54
| 12,623,293
| 14
| 8
| null | 2018-05-17T22:34:51
| 2013-09-05T16:52:53
|
R
|
UTF-8
|
R
| false
| false
| 2,046
|
r
|
ShannonDiversity.R
|
#' ShannonDiversity Shannon Diversity Metrics
#'
#' Calculates a number of metrics based on the Shannon information entropy
#' measure of diversity in a vector, x.
#'
#' @param x 1 x n vector.
#' @return \item{H}{Shannon entropy-based metric of diversity. This captures
#' the effects of both richnes (the length of the vector, n) and the evenennes
#' of the distribution.} \item{Hmax}{The maximum possible value of H given a
#' vector of the length n provided.} \item{Hr}{Relative evenness Hr = H/Hmax}
#' \item{Hcentral}{The centralization or concentration of the values among the
#' n elements} \item{n}{Number of elements in the vector.}
#' \item{effective.n}{effective number of elements in the vector, given the
#' distribution of the relative weights.}
#' @note The formulation for Shannon Diversity uses a natural logarithm. As
#' the natural logarithm of zero is undefined, the input vector cannot contain
#' zeros. Analytically, there are two approaches to dealing with this issue if
#' your vector contains zeros. First, you can apply the analysis to only the
#' non-zero elements. Second, you can add a tiny amount to all of the elements
#' such that the zero elements are now very small numbers, relative the
#' original vector values.
#' @author Stuart R. Borrett
#' @export ShannonDiversity
#' @examples
#'
#' data(oyster)
#'
#' ## throughflow diversity
#' T <- enaFlow(oyster)$T
#' ShannonDiversity(T)
#'
#' ## storage (biomass) biodiversity
#' ## X <- oyster %v% "storage"
#' ## ShannonDiversity(X)
#' @import network
ShannonDiversity <- function(x){
p <- x/sum(x) # relative proportion
H <- -1 * sum(p * log(p) ) # results in nats (using natural log) # Shannon Diversity
Hmax <- log(length(x)) # maximum possible Shannon Diversity
Hr <- H/Hmax
Hcentral <- 1-Hr
effective.n <- exp(H) # effecive number of elements
n <- length(x) # number of elements
return(c("H"=H, "Hmax" = Hmax, "Hr" = Hr, "Hcentral" = Hcentral,
"n" = n,
"effective.n" = effective.n))
}
|
f53daef2d72ebcca6945a94043474009feef0448
|
63f42fd8e71f9cb58882dbc80bbb5fd1ae9a7e45
|
/src/h2o-automl-demo/quanteda_lab.R
|
a19f1164a3d9079efd6ebcdfc6d1c6a706b79593
|
[
"MIT"
] |
permissive
|
codez0mb1e/donald-trump-tweets
|
3273f4d293a0964c849fb35cc188a778be515dba
|
cd2fad84ffe6de8734ac077f0e207b1355195457
|
refs/heads/master
| 2020-07-16T23:56:34.206734
| 2020-01-09T15:45:58
| 2020-01-09T15:45:58
| 205,895,648
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,806
|
r
|
quanteda_lab.R
|
# Import dependencies and read config ----
options(max.print = 1e3, scipen = 999, width = 1e2)
options(stringsAsFactors = F)
suppressPackageStartupMessages({
library(dplyr)
library(tidyr)
library(purrr)
library(magrittr)
library(lubridate)
library(quanteda)
})
secrets <- config::get(file = "secrets.yml")
# Load dataset ----
donald_tweets_url <- sprintf("https://datainstinct.blob.core.windows.net/twitter/realdonaldtrump.csv?%s", secrets$azure_storage_key)
donald_tweets <- fread(donald_tweets_url, quote = "")
donald_tweets %<>%
filter(!is_retweet) %>%
mutate(
id = as.character(id_str),
# dates processing
created_at = mdy_hms(created_at),
created_date = as.Date(created_at),
# text processing
text = str_replace_all(text, '\n', " || "),
text = str_replace_all(text, "&", "and"),
text = str_replace_all(text, '"', ""),
text = str_replace_all(text, "http(s)://t.co/[A-Za-z\\d]+", "<url>"),
) %>%
select(-id_str)
donald_tweets %>%
select(created_at, text, retweet_count, favorite_count) %>%
arrange(desc(created_at)) %>%
as_tibble()
# Tokenize words ----
tweets_t <- tokens(donald_tweets$text,
remove_separators = T,
remove_symbols = T,
remove_punct = T,
remove_url = T,
remove_hyphens = T,
remove_numbers = T) %>%
tokens_ngrams(n = 1:3, concatenator = " ")
print(tweets_t %>% tail)
# Topics network ----
tweets_dfm <- dfm(tweets_t,
tolower = T, stem = T,
remove = stopwords("en"))
print(tweets_dfm)
tweets_dfm %>%
textplot_wordcloud(max_words = 200)
## @user network
users_dfm <- dfm_select(tweets_dfm, pattern = "@*")
top_users <- names(topfeatures(users_dfm, 20))
top_users %>% head
users_fcm <- dfm_select(tweets_dfm, pattern = top_users) %>% fcm()
users_fcm %>% head
textplot_network(users_fcm,
min_freq = .1,
edge_alpha = .25,
edge_size = 5)
## #hashtags network
hashtags_dfm <- dfm_select(tweets_dfm, pattern = "#*")
top_hashtags <- names(topfeatures(hashtags_dfm, 20))
top_hashtags %>% head
hashtags_fcm <- dfm_select(tweets_dfm, pattern = top_hashtags) %>% fcm()
hashtags_fcm %>% head
textplot_network(hashtags_fcm,
min_freq = .1,
edge_alpha = .25,
edge_size = 5)
# Calc Td-Idf ----
tweets_dfm_w <- dfm_tfidf(tweets_dfm)
tweets_dfm_w %>% head
# Sentiment analysis ----
tweets_dict <- tokens_lookup(tweets_t, dictionary = data_dictionary_LSD2015)
print(tweets_dict %>% tail)
tweets_sa <- dfm(tweets_dict,
tolower = T, stem = T,
remove = stopwords("en"))
print(tweets_sa)
|
dd8327251e595e04bf91328be91e07d1aa940e12
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/PedCNV/R/cnvlmm_plot.R
|
2ac5bf293288c50507a08332e02e1ef6ed0b66e4
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,723
|
r
|
cnvlmm_plot.R
|
##' Makes formatted plots from the clustering result returned from \code{\link{ClusProc}}.
##'
##' @title Plots clustering result
##' @param x The clustering results obtained from \code{\link{ClusProc}}.
##' @param type Factor. For specifying the plot type. It must be one of 'histo', 'scat' and 'sil'. If it is 'histo', the histogram is obtained with the first PC score of the intensity measurement. For 'scat', the first PC score of the intensity measurement is plotted against the mean of the intensity measurement. For 'sil', the silhouette score is plotted. See details.
##' @param adjust Logicals. If TRUE (default), the silhouette-adjusted clustering result will be used. If FALSE, the initial clustering result will be used. See details in \code{\link{ClusProc}}.
##' @param ... Usual arguments passed to the qplot function.
##' @details
##' \itemize{
##' \item{type}{We provide three types of plots: 'hist', 'scat' and 'sil'. The first two plots are used to visually check the performance of clustering. Different clusters are represented by using different colors. The 'sil' plot is the the overview of the silhouette value for all the individuals, the silhouettes of the different clusters are printed below each other. The higher silhouettes value means the better performance.}
##' }
##' @author Meiling Liu
##' @method plot clust
##' @examples
##' # Fit the data under the given clustering numbers
##' clus.fit <- ClusProc(signal=signal,N=2:6,varSelection='PC.9')
##' plot(clus.fit,type='histo')
##' @export
plot.clust <- function(x,type=c('histo','scat','sil'), adjust=TRUE, ...){
MEAN=NULL
PC1=NULL
type <- match.arg(type)
if(type=='histo'){
sX <- as.matrix(x$signal)
pca <- princomp(sX)
PCA1 <- sX%*%loadings(pca)[,1]
if(adjust) {
clusters <- matrix(x$silWidth$adjusted$silRes.adjust$clus)
rownames(clusters) <- rownames(x$sil$adjusted$silRes.adjust)} else {
clusters <- matrix(x$silWidth$unadjusted$silRes$clus)
rownames(clusters) <- rownames(x$sil$unadjusted$silRes)}
temp <- as.data.frame(merge(PCA1,clusters,by='row.names')[,-1])
colnames(temp) <- c('PC1','clusters')
temp[,2] <- factor(temp[,2])
print(qplot(PC1,fill=clusters,data=temp,geom="density")+geom_histogram(aes_string(y='..count..'),binwidth=0.2))
}
if(type=='scat'){
signal <- x$signal
sX <- as.matrix((signal))
pca <- princomp(sX)
PCA1 <- sX%*%loadings(pca)[,1]
segmean <- as.matrix(apply(sX,1,mean))
if(adjust) {
clusters <- matrix(x$silWidth$adjusted$silRes.adjust$clus)
rownames(clusters) <- rownames(x$sil$adjusted$silRes.adjust)} else {
clusters <- matrix(x$silWidth$unadjusted$silRes$clus)
rownames(clusters) <- rownames(x$sil$unadjusted$silRes)}
temp <- as.data.frame(merge(cbind(segmean,PCA1),clusters,by='row.names')[,-1])
colnames(temp) <- c('MEAN','PC1','clusters')
temp[,3] <- factor(temp[,3])
print(qplot(MEAN,PC1,color=clusters,data=temp))
}
if(type=='sil'){
if(adjust){
silRes <- x$silWidth$adjusted$silRes.adjust
silMean <- x$silWidth$adjusted$silMean.adjust
clusNo <- x$silWidth$adjusted$clusNum.adjust
clusAvg <- x$silWidth$adjusted$clusAvg.adjust
abandon_num <- length(x$silWidth$adjusted$abandon.id)
}else{
silRes <- x$silWidth$unadjusted$silRes
silMean <- x$silWidth$unadjusted$silMean
clusNo <- x$silWidth$unadjusted$clusNum
clusAvg <- x$silWidth$unadjusted$clusAvg
abandon_num <- 0
}
silRes <- silRes[with(silRes,order(silRes$clus,-silRes$sil)),]
obsNo <- dim(silRes)[1]
clusRes <- silRes$clus
s <- rev(silRes[,"sil"])
space <- c(0,rev(diff(cli <- silRes$clusRes)))
space[space!=0] <- 5
xlab <- expression("Silhouette width"* s[i])
main <- paste("Silhouette plot")
sub <- paste("Average silhouette width:",round(silMean,4))
y <- barplot(s,width=1,space=space,xlim=c(min(0,min(s)),1),horiz=TRUE,col="grey",mgp=c(2.5,1,0),las=1,border=0,xlab=xlab)
title(main=main,sub=sub,adj=0)
mtext(paste("n=",obsNo,'; abandon=', abandon_num),adj=0)
mtext(substitute(k ~ ~"clusters" ~ ~C[j], list(k=clusNo)),adj=1)
mtext(expression(paste(j, " : ", n[j], " | ", ave[i %in% Cj] ~ ~s[i])), adj = 1.04, line = -1.2)
y <- rbind(rev(y),(clusRes))
for (j in 1:clusNo) {
yj <- mean(y[1,y[2,]==j-1])
text(1,yj , paste(j-1, ": ",table(clusRes)[j], " | ", format(clusAvg[j], digits = 1, nsmall = 2)), xpd = NA, adj = 0.8)
}
}
}
|
0f1c28c4263b40afe2230f002735678fecc608ee
|
a9bce7ec992cfbc6713c01afc947fe4a3296ffdb
|
/man/test_coverage.Rd
|
12a3477b02985d00e5860bd2973a9a08c48b0a8a
|
[] |
no_license
|
jimsforks/testthis
|
059e2aec4080a05e56c0d15cb372247a58104d7e
|
2f22b34d3e6fe851a09517e93d891237063ad8a9
|
refs/heads/master
| 2022-11-20T19:25:33.146038
| 2020-07-21T05:22:37
| 2020-07-21T05:22:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,583
|
rd
|
test_coverage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_coverage.R
\name{test_coverage}
\alias{test_coverage}
\title{Test coverage of package}
\usage{
test_coverage(from_tags = TRUE, from_desc = TRUE)
}
\arguments{
\item{from_tags}{\code{logical} scalar. Checks the files if your test directory for
testthis tags. Specifically, if you have the comment \verb{#* @testing myfunction}
in any of your test files, myfunction will be marked as tested.}
\item{from_desc}{\code{logical} scalar. Checks the \code{desc} argument
\code{test_that(...)} of the tests in your test directory for functions
names. E.g. if you have a test file that contains
\code{test_that("myfunction works", {...})}, myfunction will be marked as
tested.}
}
\value{
A \code{Test_coverage} object. This is a \code{data.frame} containing the
following columns:
\itemize{
\item fun: Name of the function
\item exp: Is function is exported?
\item s3: Is function an S3 method?
\item tested: Do unit tests exist for function?
\item ignore: Is function listed in \file{tests/testthat/_testignore}?
}
}
\description{
This determines the test coverage of the target package based on the \code{desc}
argument of \code{test_that()} calls. If you require a more comprehensive analysis
of test coverage, try the package \strong{covr} instead.
}
\details{
\code{test_coverage} looks in \code{.covrignore} for functions that should be ignored
for coverage analysis (see \code{\link[usethis:use_coverage]{usethis::use_covr_ignore()}})
}
\examples{
\dontrun{
x <- test_coverage()
as.data.frame(x)
}
}
|
8b3658c10de9c35105561de228cb39b2f85424e0
|
00be44c6e49e7f0e948bb202457240467665480e
|
/R_scripts/buildByteBIgrams.R
|
ad4264e262de26135c50958836345f394a8613d4
|
[] |
no_license
|
AkiraKane/scharf-personal
|
b9469d76e026255283f99d66c5cb0e17456bf8b5
|
b6b6560bc8ac5033871e6e64cb2920b6b14f30bd
|
refs/heads/master
| 2021-01-19T20:58:07.745222
| 2016-09-22T15:12:26
| 2016-09-22T15:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 692
|
r
|
buildByteBIgrams.R
|
require(data.table)
require(doMC)
registerDoMC(cores = 8)
files = sort(list.files('/media/tim-ssd/backup/malware_data/byte',full.names = T))
alpha = c(0:9,'A','B','C','D','E','F')
hex = c('??',as.character(outer(alpha,alpha,FUN = 'paste0')))
bigram_lev = as.character(outer(hex,hex,FUN='paste0'))
start = proc.time()
M = foreach(file = iter(files))%dopar%
{
corpus = fread(input = file,header = F)
n = nrow(corpus)
bigram = factor( corpus[,paste0(V1[1:(n-1)], V1[2:n])], levels = bigram_lev)
t1 = tabulate(bigram,nbins = 66049)
t1
}
time = proc.time() - start
print(time[3])
V = do.call(rbind,M)
rownames(V) = substring(basename(files),1,20)
saveRDS(V,'bigram_mat')
|
cd76468c121cd25eda4c97dbf55e47e2e6a44bd2
|
a97332504666f71b63b093bb8dfbb3ba7d670928
|
/NMA_barplot.R
|
5991072a3cccafeaebd8e8806294dcd6a990490e
|
[] |
no_license
|
JiayiJessieTong/NMA_data_analysis
|
5fb67a58456807d2e62fd95b8bb19dbb5e0e542b
|
d8fe7ab0e43628c8bb5ea15007f04a42c5f0b487
|
refs/heads/master
| 2020-07-08T13:00:49.388996
| 2019-10-09T01:15:58
| 2019-10-09T01:15:58
| 203,680,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,474
|
r
|
NMA_barplot.R
|
# this code is used to draw the barplot to show the ranking of different drugs
# Aim1: barplot for the following six drugs (selective serotonin reuptake inhibitor (SSRI))
# "Paroxetine, Fluvoxamine, Escitalopram, Sertraline, Fluoxetine, Citalopram"
# Aim2: barplot for the following five drugs (serotonin–norepinephrine reuptake inhibitor (SNRI))
# "Duloxetine, Venlafaxine, Milnacipran, Levomilnacipran, Desvenlafaxine"
# last update: 10/08
library(mvtnorm)
library(ggplot2)
library(reshape)
library(scales)
library(plyr)
library(grid)
# source("/Users/Jessie/Dropbox/2_Rui_and_Jessie/Summer_2019/1_Multi_NMA/Data_analysis")\
load("/Users/jiayito/Dropbox/2_Rui_and_Jessie/Summer_2019/1_Multi_NMA/Data_analysis/Results.RData")
#Run analysis
out = CLNMA.equal.tau.fullout(dataout)
mu1 = out[[1]]
mu2 = out[[2]]
tau1 = out[[3]]
tau2 = out[[4]]
Varmatrix =out[[5]]
#Confidence interval
mu1u = mu1+1.96*sqrt(diag(Varmatrix)[1:21])
mu1l = mu1-1.96*sqrt(diag(Varmatrix)[1:21])
mu2u = mu2+1.96*sqrt(diag(Varmatrix)[23:43])
mu2l = mu2-1.96*sqrt(diag(Varmatrix)[23:43])
##########################################################################
########################## six drugs #####################################
##########################################################################
#SUCRA plot
Nsim = 50000
# drug_id_list = c(4, 8, 9, 10, 15, 17)
drug_id_list = c(6,7,8,9,11,17)
########################### Efficacy 90% + safety 10%################################
m = 0.9*mu1[drug_id_list] - 0.1*mu2[drug_id_list]
Varm = 0.9*0.9*Varmatrix[drug_id_list,drug_id_list]+
0.1*0.1*Varmatrix[drug_id_list+22,drug_id_list+22]-
0.1*0.9*Varmatrix[drug_id_list,drug_id_list+22]-
0.1*0.9*t(Varmatrix[drug_id_list,drug_id_list+22])
# m = mu1[drug_id_list]
# Varm = Varmatrix[drug_id_list,drug_id_list]
y = rmvnorm(Nsim,m,Varm)
R1 = apply(y,1,function(x){order(x,decreasing = T)})
get.count = function(x){
ct = rep(0,6)
t = table(x)
ct[as.numeric(rownames(t))] = t
return(ct)
}
C1 = apply(R1,1,get.count)
C1 = data.frame(C1)
colnames(C1) = as.character(1:6)
#Add an id variable for the filled regions
datm <- melt(cbind(C1, ind = rownames(C1)), id.vars = c('ind'))
colnames(datm) = c("Treatments", "rank","Probability")
datm$Treatments = as.character(datm$Treatments)
# l1 = c("citalopram","escitalopram", "fluoxetine","fluvoxamine", "paroxetine", "sertraline")
l1 = c("desvenlafaxine","duloxetine","escitalopram","fluoxetine", "levominalcipran","sertraline")
datm$Treatments =mapvalues(datm$Treatment, from =1:6, to=l1)
# cpb1 = c("#AD9ED7", "#E07680", "#7ED9CA", "#BD5AD8", "#D6CEBD", "#B7DB65")
cpb1 = c("#D6CEBD", "#E07680", "#7ED9CA","#B7DB65","#AD9ED7","#BD5AD8")
# ggplot
pdf("/Users/jiayito/Dropbox/000_UPenn_Research/000_project/000_with_Rui/summer_2019_with_Rui/0_NMA/1008_final_update_prob_ranking_6drugs_efficacy_50000.pdf",height=6,width=10)
ggplot(datm,aes(x = rank, y = Probability,fill = Treatments)) +
geom_bar(position = "fill",stat = "identity",color='black') +
scale_fill_manual("Drugs", values = cpb1) +
scale_y_continuous(labels = percent_format())+
xlab("Ranks") + ylab("% probability to rank at each place") +
ggtitle("90%Efficacy+10%Safety")+theme(plot.title = element_text(hjust = 0.5,face = "bold")) +
theme_classic(base_size = 20)
grid.text("1",
x = unit(0.789, "npc"), y = unit(0.585, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("2",
x = unit(0.789, "npc"), y = unit(0.585 -0.041, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("3",
x = unit(0.789, "npc"), y = unit(0.585-2*0.040, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("4",
x = unit(0.789, "npc"), y = unit(0.585-3*0.040, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("5",
x = unit(0.789, "npc"), y = unit(0.585-4*0.040, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("6",
x = unit(0.789, "npc"), y = unit(0.585-5*0.040, "npc"),just = "left", gp=gpar(fontsize=11))
dev.off()
########################### Efficacy 50% +Safety 50% ################################
m = mu1[drug_id_list]-mu2[drug_id_list]
Varm = Varmatrix[drug_id_list,drug_id_list]+Varmatrix[drug_id_list+22,drug_id_list+22]-
Varmatrix[drug_id_list,drug_id_list+22]-t(Varmatrix[drug_id_list,drug_id_list+22])
y = rmvnorm(Nsim,m,Varm)
R2 = apply(y,1,function(x){order(x,decreasing = T)})
get.count = function(x){
ct = rep(0,6)
t = table(x)
ct[as.numeric(rownames(t))] = t
return(ct)
}
C2 = apply(R2,1,get.count)
C2 = data.frame(C2)
colnames(C2) = as.character(1:6)
datm <- melt(cbind(C2, ind = rownames(C2)), id.vars = c('ind'))
colnames(datm) = c("Treatments", "rank","Probability")
datm$Treatments = as.character(datm$Treatments)
# l1 = c("citalopram","escitalopram", "fluoxetine","fluvoxamine", "paroxetine", "sertraline")
l1 = c("desvenlafaxine","duloxetine","escitalopram","fluoxetine", "levominalcipran","sertraline")
datm$Treatments =mapvalues(datm$Treatment, from =1:6, to=l1)
pdf("/Users/jiayito/Dropbox/000_UPenn_Research/000_project/000_with_Rui/summer_2019_with_Rui/0_NMA/1008_final_update_prob_ranking_6drugs_50+50_50000.pdf",height=6,width=10)
ggplot(datm,aes(x = rank, y = Probability,fill = Treatments)) +
geom_bar(position = "fill",stat = "identity",color='black') +
xlab("Ranks") + ylab("% probability to rank at each place") +
scale_fill_manual("Drugs", values = cpb1) +
scale_y_continuous(labels = percent_format())+ggtitle("50%Efficacy+50%Safety")+theme(plot.title = element_text(hjust = 0.5,face = "bold")) +
theme_classic(base_size = 20)
dev.off()
######################### Safety 90% + Efficacy 10% Only ##################################
# m = -mu2[drug_id_list]
# Varm =Varmatrix[22+drug_id_list,22+drug_id_list]
m = 0.1*mu1[drug_id_list] - 0.9*mu2[drug_id_list]
Varm = 0.1*0.1*Varmatrix[drug_id_list,drug_id_list]+
0.9*0.9*Varmatrix[drug_id_list+22,drug_id_list+22]-
0.1*0.9*Varmatrix[drug_id_list,drug_id_list+22]-
0.1*0.9*t(Varmatrix[drug_id_list,drug_id_list+22])
y = rmvnorm(Nsim,m,Varm)
R3 = apply(y,1,function(x){order(x,decreasing = T)})
get.count = function(x){
ct = rep(0,6)
t = table(x)
ct[as.numeric(rownames(t))] = t
return(ct)
}
C3 = apply(R3,1,get.count)
C3 = data.frame(C3)
colnames(C3) = as.character(1:6)
datm <- melt(cbind(C3, ind = rownames(C3)), id.vars = c('ind'))
colnames(datm) = c("Treatments", "rank","Probability")
datm$Treatments = as.character(datm$Treatments)
# l1 = c("citalopram","escitalopram", "fluoxetine","fluvoxamine", "paroxetine", "sertraline")
l1 = c("desvenlafaxine","duloxetine","escitalopram","fluoxetine", "levominalcipran","sertraline")
datm$Treatments =mapvalues(datm$Treatment, from =1:6, to=l1)
pdf("/Users/jiayito/Dropbox/000_UPenn_Research/000_project/000_with_Rui/summer_2019_with_Rui/0_NMA/1008_final_update_prob_ranking_6drugs_safety_50000.pdf",height=6,width=10)
ggplot(datm,aes(x = rank, y = Probability,fill = Treatments)) +
geom_bar(position = "fill",stat = "identity",color='black') +
xlab("Ranks") + ylab("% probability to rank at each place") +
scale_fill_manual("Drugs", values = cpb1) +
scale_y_continuous(labels = percent_format())+ggtitle("10%Efficacy+90%Safety")+theme(plot.title = element_text(hjust = 0.5,face = "bold"))+
theme_classic(base_size = 20)
dev.off()
##########################################################################
########################## done #########################################
##########################################################################
##########################################################################
########################## five drugs #####################################
##########################################################################
#SUCRA plot
Nsim = 50000
drug_id_list2 = c(6, 7, 11, 12, 19)
########################### Efficacy 90% + safety 10% ################################
m = 0.9*mu1[drug_id_list2] - 0.1*mu2[drug_id_list2]
Varm = 0.9*0.9*Varmatrix[drug_id_list2,drug_id_list2]+
0.1*0.1*Varmatrix[drug_id_list2+22,drug_id_list2+22]-
0.1*0.9*Varmatrix[drug_id_list2,drug_id_list2+22]-
0.1*0.9*t(Varmatrix[drug_id_list2,drug_id_list2+22])
# m = mu1[drug_id_list2]
# Varm = Varmatrix[drug_id_list2,drug_id_list2]
y = rmvnorm(Nsim,m,Varm)
R1 = apply(y,1,function(x){order(x,decreasing = T)})
get.count = function(x){
ct = rep(0,5)
t = table(x)
ct[as.numeric(rownames(t))] = t
return(ct)
}
C1 = apply(R1,1,get.count)
C1 = data.frame(C1)
colnames(C1) = as.character(1:5)
#Add an id variable for the filled regions
datm <- melt(cbind(C1, ind = rownames(C1)), id.vars = c('ind'))
colnames(datm) = c("Treatments", "rank","Probability")
datm$Treatments = as.character(datm$Treatments)
l2 = c("desvenlafaxine", "duloxetine", "levomilnacipran", "milnacipran", "venlafaxine")
datm$Treatments =mapvalues(datm$Treatment, from =1:5, to=l2)
cpb2 = c("#A5D9CD", "#DE8F7F", "#B29ED5", "#C259D1", "#B5DC6B")
# ggplot
pdf("/Users/jiayito/Dropbox/000_UPenn_Research/000_project/000_with_Rui/summer_2019_with_Rui/0_NMA/1001_update_prob_ranking_5drugs_efficacy_50000.pdf",height=6,width=10)
ggplot(datm,aes(x = rank, y = Probability,fill = Treatments)) +
geom_bar(position = "fill",stat = "identity",color="black") +
scale_fill_manual("Drugs", values = cpb2) +
scale_y_continuous(labels = percent_format())+
xlab("Ranks") + ylab("% probability to rank at each place") +
ggtitle("90%Efficacy+10%Safety")+theme(plot.title = element_text(hjust = 0.5,face = "bold")) +
theme_classic(base_size = 20)
grid.text("1",
x = unit(0.790, "npc"), y = unit(0.565, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("2",
x = unit(0.790, "npc"), y = unit(0.565 -0.042, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("3",
x = unit(0.790, "npc"), y = unit(0.567- 2* 0.042, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("4",
x = unit(0.790, "npc"), y = unit(0.567-3*0.042, "npc"),just = "left", gp=gpar(fontsize=11))
grid.text("5",
x = unit(0.790, "npc"), y = unit(0.571-4*0.042, "npc"),just = "left", gp=gpar(fontsize=11))
dev.off()
########################### Efficacy 50% +Safety 50% ################################
m = mu1[drug_id_list2]-mu2[drug_id_list2]
Varm = Varmatrix[drug_id_list2,drug_id_list2]+Varmatrix[drug_id_list2+22,drug_id_list2+22]-
Varmatrix[drug_id_list2,drug_id_list2+22]-t(Varmatrix[drug_id_list2,drug_id_list2+22])
y = rmvnorm(Nsim,m,Varm)
R2 = apply(y,1,function(x){order(x,decreasing = T)})
get.count = function(x){
ct = rep(0,5)
t = table(x)
ct[as.numeric(rownames(t))] = t
return(ct)
}
C2 = apply(R2,1,get.count)
C2 = data.frame(C2)
colnames(C2) = as.character(1:5)
datm <- melt(cbind(C2, ind = rownames(C2)), id.vars = c('ind'))
colnames(datm) = c("Treatments", "rank","Probability")
datm$Treatments = as.character(datm$Treatments)
l2 = c("desvenlafaxine", "duloxetine", "levomilnacipran", "milnacipran", "venlafaxine")
datm$Treatments =mapvalues(datm$Treatment, from =1:5, to=l2)
# ggplot
pdf("/Users/jiayito/Dropbox/000_UPenn_Research/000_project/000_with_Rui/summer_2019_with_Rui/0_NMA/1001_update_prob_ranking_5drugs_50+50_50000.pdf",height=6,width=10)
ggplot(datm,aes(x = rank, y = Probability,fill = Treatments)) +
geom_bar(position = "fill",stat = "identity",color="black") +
scale_fill_manual("Drugs", values = cpb2) +
scale_y_continuous(labels = percent_format())+
xlab("Ranks") + ylab("% probability to rank at each place") +
ggtitle("50%Efficacy+50%Safety")+theme(plot.title = element_text(hjust = 0.5,face = "bold")) +
theme_classic(base_size = 20)
dev.off()
######################### Safety Only ##################################
# m = -mu2[drug_id_list2]
# Varm =Varmatrix[22+drug_id_list2,22+drug_id_list2]
m = 0.1*mu1[drug_id_list2] - 0.9*mu2[drug_id_list2]
Varm = 0.1*0.1*Varmatrix[drug_id_list2,drug_id_list2]+
0.9*0.9*Varmatrix[drug_id_list2+22,drug_id_list2+22]-
0.1*0.9*Varmatrix[drug_id_list2,drug_id_list2+22]-
0.1*0.9*t(Varmatrix[drug_id_list2,drug_id_list2+22])
y = rmvnorm(Nsim,m,Varm)
R3 = apply(y,1,function(x){order(x,decreasing = T)})
get.count = function(x){
ct = rep(0,5)
t = table(x)
ct[as.numeric(rownames(t))] = t
return(ct)
}
C3 = apply(R3,1,get.count)
C3 = data.frame(C3)
colnames(C3) = as.character(1:5)
datm <- melt(cbind(C3, ind = rownames(C3)), id.vars = c('ind'))
colnames(datm) = c("Treatments", "rank","Probability")
datm$Treatments = as.character(datm$Treatments)
l2 = c("desvenlafaxine", "duloxetine", "levomilnacipran", "milnacipran", "venlafaxine")
datm$Treatments =mapvalues(datm$Treatment, from =1:5, to=l2)
# ggplot
pdf("/Users/jiayito/Dropbox/000_UPenn_Research/000_project/000_with_Rui/summer_2019_with_Rui/0_NMA/1001_update_prob_ranking_5drugs_safety_50000.pdf",height=6,width=10)
ggplot(datm,aes(x = rank, y = Probability,fill = Treatments)) +
geom_bar(position = "fill",stat = "identity",color="black") +
scale_fill_manual("Drugs", values = cpb2) +
scale_y_continuous(labels = percent_format())+
xlab("Ranks") + ylab("% probability to rank at each place") +
ggtitle("10%Efficacy+90%Safety")+theme(plot.title = element_text(hjust = 0.5,face = "bold")) +
theme_classic(base_size = 20)
dev.off()
##########################################################################
########################## done #########################################
##########################################################################
|
f8e2bb2cd7861e83fef3a20c7af8097fbc74f87a
|
a9d15a277c938a64baff2c8aa0237aa749e019ff
|
/DS501/ds50112/project/run.r
|
02cdccd7f7369edf51296964510613cb7dc34833
|
[] |
no_license
|
dgallup2020/College
|
5f71314063264284dec2ec012fe9e193a0135568
|
ab10fc050bea85053c0bd7d5b2315a1603c8a211
|
refs/heads/master
| 2022-12-10T23:50:25.106600
| 2020-09-11T05:03:34
| 2020-09-11T05:03:34
| 294,594,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,570
|
r
|
run.r
|
#
# a plot I didn't finish in class
# to run it login, cd to /u1/junk/cs459/feb04, then run R
# and finally source('run.r')
# check out the values in the variables u and t
options(stringsAsFactors=FALSE)
# read the list, add names for each item, convert to df
li <- scan('data',what=list('','','','','','','',''))
names(li) <- c('user','ip','wday','mon','mday','in','out','rawtime')
df <- as.data.frame(li)
options(stringsAsFactors=TRUE)
# get login names that match the pattern for class accounts and
# save the first 5 chars of each
u <- substr(df$user[grep('^[dc]s[0-9][0-9][0-9]..$',df$user)],1,5)
# set background color
par(bg='wheat')
#set inner and outer margins -- mainly so there's enough room for vertical text (class names)
par(mar=c(3, 1, 1, 1))
par(oma=c(3,3,3,3))
# make a table from the class name list (count how often each name appears)
# then sort the table by frequency
#
# table uses the low level functions 'tabulate' and (if needed) 'as.factor'
# to do it's job. run tabulate(as.factor(u)) and compare.
#
t <- sort(table(u),descreasing=TRUE)
# xaxt = 'n' turns off default x axis labels
b <- barplot(t,xaxt='n')
# set x coordinate for text to b, which contains a list of x coordinates for the barplot bars
# set y < 0 so there is room for the names under the plot
# names(t) is the list of class names (keys for the table)
# xpd = TRUE means it's ok to print outside of the inner margins
# srt = -90 means rotate the text 90 degrees counterclockwise
text(x=b,y=-30,names(t),xpd=TRUE,srt=90)
title('2019 CS login count by course')
|
0c03a8255faa02cbb7edbd5239ae1004c1a19f3f
|
c02b1b6252a59c992a0f3ebb542f08fb0cf261a4
|
/man/get_player_master_acs.Rd
|
690aaa6938b467523aa931c36366f5ba28006858
|
[] |
no_license
|
systats/lolR
|
d57b04d592b40906b70f0da1acc9a332b965aa23
|
f2b38453460cac1c9fe24861603e75bebf549669
|
refs/heads/master
| 2020-03-18T07:13:38.225502
| 2018-06-02T17:13:56
| 2018-06-02T17:13:56
| 134,439,850
| 0
| 2
| null | 2018-05-31T01:11:19
| 2018-05-22T15:58:05
|
HTML
|
UTF-8
|
R
| false
| true
| 314
|
rd
|
get_player_master_acs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_player_master_acs.R
\name{get_player_master_acs}
\alias{get_player_master_acs}
\title{get_player_master_acs}
\usage{
get_player_master_acs(x)
}
\arguments{
\item{x}{html_node}
}
\value{
data
}
\description{
get players from match
}
|
fa5dadae4dd5dd2c3300160be7ee0751e97762fa
|
7bc0759b1a3f1a8a4c48e02f1c5e393a13e94eff
|
/man/extractSequence.Rd
|
f2a1c432a8d4225a2be984bb76a9880fa46ff29c
|
[] |
no_license
|
kvittingseerup/IsoformSwitchAnalyzeR
|
b7e4a249572b3d487ffcdea62a409e800ac23fe3
|
5f471360da38101777d37d9eb91a99c3ac81eda4
|
refs/heads/master
| 2023-07-21T19:52:53.966098
| 2023-06-30T11:04:50
| 2023-06-30T11:04:50
| 88,636,530
| 66
| 20
| null | 2023-01-31T12:23:30
| 2017-04-18T14:46:47
|
R
|
UTF-8
|
R
| false
| false
| 9,847
|
rd
|
extractSequence.Rd
|
\name{extractSequence}
\alias{extractSequence}
\title{
Extract nucloetide (and amino acid) sequence of transcripts.
}
\description{
This function extracts the nucleotide (NT) sequence of transcripts by extracting and concatenating the sequences of a reference genome corresponding to the genomic coordinates of the isoforms. If ORF is annotated (e.g. via \code{analyzeORF}) this function can furthermore translate the ORF NT sequence to Amino Acid (AA) sequence (via the Biostrings::translate() function where if.fuzzy.codon='solve' is specified). The sequences (both NT and AA) can be outputted as fasta file(s) and/or added to the \code{switchAnalyzeRlist}.
}
\usage{
extractSequence(
switchAnalyzeRlist,
genomeObject = NULL,
onlySwitchingGenes = TRUE,
alpha = 0.05,
dIFcutoff = 0.1,
extractNTseq = TRUE,
extractAAseq = TRUE,
removeShortAAseq = TRUE,
removeLongAAseq = FALSE,
alsoSplitFastaFile = FALSE,
removeORFwithStop=TRUE,
addToSwitchAnalyzeRlist = TRUE,
writeToFile = TRUE,
pathToOutput = getwd(),
outputPrefix='isoformSwitchAnalyzeR_isoform',
forceReExtraction = FALSE,
quiet=FALSE
)
}
\arguments{
\item{switchAnalyzeRlist}{
A \code{switchAnalyzeRlist} object (where ORF info (predicted by \link{analyzeORF}) have been added if the amino acid sequence should be extracted).
}
\item{genomeObject}{
A \code{BSgenome} object uses as reference genome (for example Hsapiens for Homo sapiens, Mmusculus for mouse). Only necessary if sequences have not already been extracted.
}
\item{onlySwitchingGenes}{
A logic indicating whether the only sequences from transcripts in genes with significant switching isoforms (as indicated by the \code{alpha} and \code{dIFcutoff} cutoff) should be extracted. Default is TRUE.
}
\item{alpha}{
The cutoff which the FDR correct p-values must be smaller than for calling significant switches. Default is 0.05.
}
\item{dIFcutoff}{
The cutoff which the changes in (absolute) isoform usage must be larger than before an isoform is considered switching. This cutoff can remove cases where isoforms with (very) low dIF values are deemed significant and thereby included in the downstream analysis. This cutoff is analogous to having a cutoff on log2 fold change in a normal differential expression analysis of genes to ensure the genes have a certain effect size. Default is 0.1 (10\%).
}
\item{extractNTseq}{
A logical indicating whether the nucleotide sequence of the transcripts should be extracted (necessary for CPAT analysis). Default is TRUE.
}
\item{extractAAseq}{
A logical indicating whether the amino acid (AA) sequence of the annotated open reading frames (ORF) should be extracted (necessary for pfam and SignalP analysis). The ORF can be annotated with the \code{analyzeORF} function. Default is TRUE.
}
\item{removeShortAAseq}{
A logical indicating whether to remove sequences based on their length. This option exist to allows for easier usage of the Pfam and SignalP web servers which both currently have restrictions on allowed sequence lengths. If enabled AA sequences are filtered to be > 5 AA. This will only affect the sequences written to the fasta file (if \code{writeToFile=TRUE}) not the sequences added to the switchAnalyzeRlist (if \code{addToSwitchAnalyzeRlist=TRUE}). Default is TRUE.
}
\item{removeLongAAseq}{
A logical indicating whether to removesequences based on their length. This option exist to allows for easier usage of the Pfam and SignalP web servers which both currently have restrictions on allowed sequence lengths. If enabled AA sequences are filtered to be < 1000 AA. This will only affect the sequences written to the fasta file (if \code{writeToFile=TRUE}) not the sequences added to the switchAnalyzeRlist (if \code{addToSwitchAnalyzeRlist=TRUE}). Default is FALSE.
}
\item{alsoSplitFastaFile}{
A subset of the web based analysis tools currently supported by IsoformSwitchAnalyzeR have restrictions on the number of sequences in each submission (currently PFAM and to a less extend SignalP). To enable easy use of those web tool this parameter was implemented. By setting this parameter to TRUE a number of amino acid FASTA files will ALSO be generated each only containing the number of sequences allow (currently max 500 for some tools) thereby enabling easy analysis of the data in multiple web-based submissions. Only considered (if \code{writeToFile=TRUE}).
}
\item{removeORFwithStop}{
A logical indicating whether ORFs containing stop codons, defined as * when the ORF nucleotide sequences is translated to the amino acid sequence, should be A) removed from the ORF annotation in the switchAnalyzeRlist and B) removed from the sequences added to the switchAnalyzeRlist and/or written to fasta files. This is only necessary if you are analyzing quantified known annotated data where you supplied a GTF file to the import function. If you have used \code{analyzeORF} to identify ORFs this should not have an effect. This option will have no effect if no ORFs are found. Default is TRUE.
}
\item{addToSwitchAnalyzeRlist}{
A logical indicating whether the extracted sequences should be added to the \code{switchAnalyzeRlist}. Default is TRUE.
}
\item{writeToFile}{
A logical indicating whether the extracted sequence(s) should be exported to (separate) fasta files (thereby enabling analysis with external software such as CPAT, Pfam and SignalP). Default is TRUE.
}
\item{pathToOutput}{
If \code{writeToFile} is TRUE, this argument controls the path to the directory where the fasta files are exported to. Default is working directory.
}
\item{outputPrefix}{
If \code{writeToFile=TRUE} this argument allows for a user specified prefix of the output files(s). The prefix provided here will get a suffix of '_nt.fasta' or '_AA.fasta' depending on the file type. Default is 'isoformSwitchAnalyzeR_isoform' (thereby creating the 'isoformSwitchAnalyzeR_isoform_nt.fasta' and 'isoformSwitchAnalyzeR_isoform_AA.fasta' files).
}
\item{forceReExtraction}{ A logic indicating whether to force re-extraction of the biological sequences - else sequences already stored in the switchAnalyzeRlist will be used instead if available (because this function had already been used once). Default is FALSE}
\item{quiet}{ A logic indicating whether to avoid printing progress messages. Default is FALSE}
}
\details{
Changes in isoform usage are measure as the difference in isoform fraction (dIF) values, where isoform fraction (IF) values are calculated as <isoform_exp> / <gene_exp>.\cr
The BSGenome object are loaded as separate packages. Use for example \code{library(BSgenome.Hsapiens.UCSC.hg19)} to load the human genome v19 - which is then loaded as the object Hsapiens (that should be supplied to the \code{genomeObject} argument). It is essential that the chromosome names of the annotation fit with the genome object. The \code{extractSequence} function will automatically take the most common ambiguity into account: whether to use 'chr' in front of the chromosome name (UCSC style, e.g.. 'chr1') or not (Ensembl style, e.g.. '1').
The two fasta files outputted by this function (if \code{writeToFile=TRUE}) can be used as input to among others:
\itemize{
\item{\code{CPAT} : The Coding-Potential Assessment Tool, which can be run either locally or via their webserver \url{http://lilab.research.bcm.edu/cpat/} }
\item{\code{Pfam} : Prediction of protein domains, which can be run either locally or via their webserver \url{http://pfam.xfam.org/search#tabview=tab1} }
\item{\code{SignalP} : Prediction of Signal Peptide, which can be run either locally or via their webserver \url{http://www.cbs.dtu.dk/services/SignalP/} }
}
See \code{?analyzeCPAT}, \code{?analyzePFAM} or \code{?analyzeSignalP} (under details) for suggested ways of running these tools.
}
\value{
If \code{writeToFile=TRUE} one fasta file pr sequence type (controlled via \code{extractNTseq} and \code{extractAAseq}) are written to the folder indicated by \code{pathToOutput}. If \code{alsoSplitFastaFile=TRUE} both a fasta file containing all isoforms (denoted '_complete' in file name) as well as a number of fasta files containing subsets of the entire file will be created. The subset fasta files will have the following indication "subset_X_of_Y" in the file names.
If \code{addToSwitchAnalyzeRlist=TRUE} the sequences are added to the \code{switchAnalyzeRlist} as respectively \code{DNAStringSet} and \code{AAStringSet} objects under the names 'ntSequence' and 'aaSequence'. The names of these sequences matches the 'isoform_id' entry in the 'isoformFeatures' entry of the switchAnalyzeRlist. The switchAnalyzeRlist is return no matter whether it was modified or not.
}
\references{
For
\itemize{
\item{\code{This function} : Vitting-Seerup et al. The Landscape of Isoform Switches in Human Cancers. Mol. Cancer Res. (2017).}
}
}
\author{
Kristoffer Vitting-Seerup
}
\seealso{
\code{\link{switchAnalyzeRlist}}\cr
\code{\link{isoformSwitchTestDEXSeq}}\cr
\code{\link{isoformSwitchTestSatuRn}}\cr
\code{\link{analyzeORF}}
}
\examples{
### Prepare for sequence extraction
# Load example data and prefilter
data("exampleSwitchList")
exampleSwitchList <- preFilter(exampleSwitchList)
# Perfom test
exampleSwitchListAnalyzed <- isoformSwitchTestDEXSeq(exampleSwitchList, dIFcutoff = 0.3) # high dIF cutoff for fast runtime
# analyzeORF
library(BSgenome.Hsapiens.UCSC.hg19)
exampleSwitchListAnalyzed <- analyzeORF(exampleSwitchListAnalyzed, genomeObject = Hsapiens)
### Extract sequences
exampleSwitchListAnalyzed <- extractSequence(
exampleSwitchListAnalyzed,
genomeObject = Hsapiens,
writeToFile=FALSE # to avoid output when running example data
)
### Explore result
head(exampleSwitchListAnalyzed$ntSequence,2)
head(exampleSwitchListAnalyzed$aaSequence,2)
}
|
525e4466edf584f927484682948c022c1cefcd40
|
d5e9909f7f6cc74db31298236d331392e8202a9f
|
/man/mRNA_matrix.Rd
|
20be4d70d23d782e89c8d1f8ab89f2971f9cea77
|
[
"Artistic-2.0"
] |
permissive
|
weiliu123/RLassoCox
|
fbfd97494b39f62ceadaf449737e1d32d24b2670
|
9c4148900f96f89125a224da6036edbf22d47b82
|
refs/heads/master
| 2023-01-14T00:28:22.782283
| 2020-11-20T07:26:05
| 2020-11-20T07:26:05
| 308,259,734
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 555
|
rd
|
mRNA_matrix.Rd
|
\name{mRNA_matrix}
\alias{mRNA_matrix}
\docType{data}
\title{
The expression data
}
\description{
An example of GBM expression data. We acknowledge the TCGA Research Network
for generating the GBM datasets.
}
\usage{data("mRNA_matrix")}
\format{
The format is:
num [1:314, 1:4853] 0.562167 0.022435 -0.000102 -0.719444 0.620269 ...
- attr(*, "dimnames")=List of 2
..$ : chr [1:314] "TCGA-02-0001" "TCGA-02-0003" "TCGA-02-0006"
..$ : chr [1:4853] "90993" "4313" "26248" "57680" ...
}
\examples{
data(mRNA_matrix)
}
\keyword{datasets}
|
9cb3244e9fc79821e9c69221a35ffb82b1404736
|
61c188bba8f228b0f14f4bae7c2fa3dcd1f7b3a2
|
/man/beta.summary.Rd
|
97898736c1dfdd51f092e490f948e2f2fd980cc6
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
covid19br/now_fcts
|
24cb3b7bcbf47b827e50fec43f0dd9647c89dde4
|
44479971618513ef23e82ac277c749b8384e12f8
|
refs/heads/master
| 2023-02-27T01:34:07.757658
| 2021-02-05T20:41:10
| 2021-02-05T20:41:10
| 273,057,036
| 0
| 1
|
CC0-1.0
| 2020-07-07T00:27:17
| 2020-06-17T19:04:42
|
R
|
UTF-8
|
R
| false
| true
| 1,108
|
rd
|
beta.summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beta.summary.R
\name{beta.summary}
\alias{beta.summary}
\title{Médias e ICs das probabilidades de notificação a cada dia}
\usage{
beta.summary(NobBS.output, NobBS.params.post)
}
\arguments{
\item{NobBS.output}{objeto retornado pela função NobBS do pacote de
mesmo nome Este argumento é ignorado se o argumento
NobBS.params.post é usado.}
\item{NobBS.params.post}{data frame com as distribuicoes
posteriores dos parâmetros estimados pela função NobBS. Está
contido na lista que é retornada pela função.}
}
\value{
data frame com média e quantis 2.5\% e 97.5\% das
distribuições a posteriori dos parâmetros de atraso de
notificação pelo método de nowcasting da função NobBS. Os
valores estão convertidos para escala de probabilidade, e
portanto podem ser interpretado como a probabilidade de um caso
ser notificado D dias após o dias o primeiro sintoma, sendo que
vai de zero ao máximo definido pelos argumentos do nowcasting
}
\description{
Médias e ICs das probabilidades de notificação a cada dia
}
|
845b71c37138a1e8d259fcd75b140a452f46c195
|
a74cf5656f928862307733948c604c16d1a8fa04
|
/R scripts/voles.R
|
8c56b5627e5d72b3279c27b37be5b126d7d6160c
|
[] |
no_license
|
bgreenwell/STT6300
|
82b08da731356d73b7bba4739c9a6c99f3d5646c
|
f254e2185daa437312b41d2c83e368f5ca681142
|
refs/heads/master
| 2021-01-20T06:36:06.998033
| 2017-12-05T21:24:08
| 2017-12-05T21:24:08
| 101,509,145
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,304
|
r
|
voles.R
|
################################################################################
# Voles data
################################################################################
# A study was conducted to differentiate between two different species of voles
# found in Europe. Several morphometric measurements were obtained from a sample
# of voles of each species (Airoldi, Flury, and Salvioni, 1996). For now, we
# shall look at the skull length measured in mmX100 of one of the species:
# microtus multiplex. In the R code below, we store the n = 43 measurements in
# the variable skull_length.
skull_length <- c(2145, 2237, 2250, 2270, 2300, 2300, 2305, 2305, 2305, 2330,
2330, 2340, 2345, 2345, 2345, 2350, 2350, 2352, 2355, 2355,
2370, 2370, 2370, 2385, 2385, 2388, 2390, 2396, 2410, 2435,
2445, 2452, 2457, 2465, 2470, 2470, 2475, 2500, 2500, 2525,
2535, 2590, 2600)
# Obtain a histogram of the skull length measurements
hist(skull_length,
freq = FALSE, # use probability density for the y-axis instead of counts
main = "Histogram of vole skull lengths", # histogram title
xlab = "Length (mmX100)", # x-axis label
xlim = c(2000, 2700))
# Overlay a normal density
x <- seq(from = 2000, to = 2700, length = 500)
y <- dnorm(x, mean = mean(skull_length), sd = sd(skull_length))
lines(x, y, lwd = 2, col = "red")
# Estimate P(X > 2600)
1 - pnorm(2600, mean = mean(skull_length), sd = sd(skull_length))
abline(v = 2600, lty = 2, lwd = 2, col = "blue")
################################################################################
# Simulated blood pressure data
################################################################################
# Histograms for data with different sample sizes
set.seed(101) # set random seed for reproducibility
par(mfrow = c(1, 3))
hist(rnorm(10, mean = 120), freq = FALSE,
main = "Coarse", xlab = "Blood pressure (mmHg)")
hist(rnorm(100, mean = 120), freq = FALSE,
main = "Finer", xlab = "Blood pressure (mmHg)")
hist(rnorm(10000, mean = 120), breaks = 100, freq = FALSE,
main = "Super fine with density curve", xlab = "Blood pressure (mmHg)")
x <- seq(from = 100, to = 130, length = 500)
y <- dnorm(x, mean = 120)
lines(x, y, lwd = 2, col = "red")
|
5b0ad268c756f554dddf928b338adb24c29c4f40
|
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
|
/analogy/03_find_analogs/R/before_biofix/wareHouse/MatchIt/mathchit/matt_style/d_matt_style.R
|
60476c3de1091fdc86136b25d24efbd9fc71e1b5
|
[
"MIT"
] |
permissive
|
HNoorazar/Ag
|
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
|
24fea71e9740de7eb01782fa102ad79491257b58
|
refs/heads/main
| 2023-09-03T18:14:12.241300
| 2023-08-23T00:03:40
| 2023-08-23T00:03:40
| 146,382,473
| 3
| 6
| null | 2019-09-23T16:45:37
| 2018-08-28T02:44:37
|
R
|
UTF-8
|
R
| false
| false
| 3,805
|
r
|
d_matt_style.R
|
.libPaths("/data/hydro/R_libs35")
.libPaths()
library(data.table)
library(dplyr)
library(MESS) # has the auc function in it.
library(geepack)
library(chron)
library(MatchIt)
source_path = "/home/hnoorazar/analog_codes/core_analog.R"
source(source_path)
options(digit=9)
options(digits=9)
#######################################################################
#############################
# #
# shell arguments #
# #
#############################
# args = commandArgs(trailingOnly=TRUE)
# model_name <- args[1]
# carbon_type <- args[2]
#############################
# #
# matching parameters #
# #
#############################
m_method <- "nearest"
m_distance <- "mahalanobis"
m_ratio <- 700
precip <- TRUE
#######################################################################
#######################################################################
##########################################################
# #
# directories #
# #
##########################################################
main_out <- "/data/hydro/users/Hossein/analog/z_R_results"
print("does this look right? This is where we are now")
getwd()
print ("_______________________________________________")
print ("main_out is ")
print (main_out)
print ("_______________________________________________")
curr_out <- file.path(main_out, m_method, m_distance)
print ("curr_out is (to be completed)")
print (curr_out)
print ("_______________________________________________")
print ("line 55, critical")
print (getwd())
curr_sub <- gsub(x = getwd(),
pattern = "/data/hydro/users/Hossein/analog/local/ready_features/broken_down_location_level_coarse/rcp85/",
replacement = "")
print ("curr_sub is ")
print (curr_sub)
print ("_______________________________________________")
final_out <- file.path(curr_out, curr_sub)
print ("This is where the output will be going (final_out) ")
print (final_out)
#######################################################################
if (dir.exists(file.path(final_out)) == F){
dir.create(path = final_out, recursive = T)
}
#######################################################################
ctrl_data <- data.table(readRDS("/data/hydro/users/Hossein/analog/usa/ready_features/all_data_usa.rds"))
# current one single file corresponding to a given
# location and year to be read as base file.
dir_con <- dir()
print ("_________________*************_________________")
print (getwd())
print ("_________________*************________________")
# remove filenames that aren't data
dir_con <- dir_con[grep(pattern = "feat_",
x = dir_con)]
print ("There should be one file here, is it?")
print (dir_con)
print ("_______________________________________________")
counter = 0
start_time <- Sys.time()
for (file in dir_con){
all_loc_data <- data.table(readRDS(file))
years = unique(all_loc_data$year)
for (yr in years){
print (yr)
start_time_one_run <- Sys.time()
counter = counter + 1
base <- all_loc_data %>% filter(year == yr)
print (dim(base))
match_out <- sort_matchit_out(base=base, usa=ctrl_data,
m_method=m_method,
m_distance=m_distance,
m_ratio=m_ratio,
precip=precip)
saveRDS(match_out, paste0(final_out, "/", base$location, "_", base$year, ".rds"))
if (counter == 1){print (Sys.time() - start_time_one_run)}
}
}
end_time <- Sys.time()
print(end_time - start_time)
|
aacfc34a1108b7cef50bc35565efdeeffcf7d9aa
|
d6eeab02f0268cec50488b03b796da5c1ec42292
|
/man/regression_cph.Rd
|
548a67dea5fdaf105ec7a1cba8728f9d2aa77b21
|
[] |
no_license
|
Ifitrains/AirBnBCopenhagen
|
7d857ab10691e9bad17c7a1f58fabaca3c972d64
|
8b046566753c9b6760130ef3caffd8fd067313c3
|
refs/heads/master
| 2020-04-20T03:25:08.524591
| 2019-01-31T22:54:31
| 2019-01-31T22:54:31
| 168,597,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 363
|
rd
|
regression_cph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression_cph.R
\name{regression_cph}
\alias{regression_cph}
\title{Estimate models with robust standard errors}
\usage{
regression_cph(Data)
}
\arguments{
\item{Data}{data frame}
}
\value{
4 regressions and 2 latex ouput
}
\description{
Estimate models with robust standard errors
}
|
b37c2b10e7db5ebe87be82a3eb4d23ae93360e34
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/issuestests/AGread/inst/testfiles/get_VM_C/get_VM_C_output/log_a008f34ddf7b2c13c4131860f85d5ba0ca8bdd9b/get_VM_C-test.R
|
ab3d60929fc6a1545f0f6b2825ead642deb6de22
|
[
"MIT"
] |
permissive
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,961
|
r
|
get_VM_C-test.R
|
testlist <- list(x = c(-9.55582196017401e+29, -4.90459010763407e-29, 4.90985848662137e-132, 1.34087923799627e-99, -2.83758126022851e-214, -3.86582708286114e+118, 1.56442933387274e-40, 7.80171305823656e-14, -1.83590112917186e-143, -2.12225393135038e-27, -2.39035558481144e+94, 1.39381080421907e+78, 8.09020071287152e+217, 3.20154172554715e+166, -1.07544081057137e+142, -1.22716245834473e+249, 0.0045576245392274, 4.110270279077e-119, -2.13090432437324e-232, -2.00173720606795e-54, 29540115.857733, 1.14560222054498e+104, -1.51179353689993e+219, 1.85583157125209e-183, 5.34002291753896e+279, 3.163119668952e+220, -2.84339292934663e+164, 5.13557333556931e-82, -4.11812482604814e-235, 9.56040251934328e-42, -2.59207149164433e-228, 1.2158458081832e-222, -1.74238018159446e+182, 7.41245245658227e-295, -4.00558303551056e+236, 3.47151817076085e-119, 4.10403783345669e-88, -7.40647194992956e+299, -2.19378089629216e+123, -1.54830595119915e-41, -2.54764432636554e+33, 2.48050399286568e+82, 3.22488547429564e-46, -7.02949111491804e+254, -7.61353710150679e+209, -17873921569877248000, -4.78466694563666e+177, 9.36219676678338e-189, 4.39158645590191e+53, -2.88703981054995e-216, 2.78090680147068e-174, 1.48246692773206e-213, -4.99040512819108e-30, 2.24505481320356e+72, -Inf, -3.81316040338393e+279, -2.59826575494024e+116, 9.66176246763658e+303, -8.7275455542068e-49, 2.49022343960829e+238, 9.23934088471241e+265, -9.44640342943815e+145, -7.78813268347435e-290, -6.86657422733089e+196, 1.2158458081832e-222, 4.38927944252164e-14, 1.2158458081832e-222, -2.79793587635494e-150, 1.21474661935272e-71, 5.11142779337243e-26, 3.61605547103826e+106, -1.29008433506166e+98, -4.71290430009938e+192, 2.16276193556995e-78, Inf, -73702024288693120, 3.67680763978728e+303, 6.79444019345624e+289, 1.57080783318714e-223, 2.44989196103371e-214, 2.14683789541018e+191, -1.56271793377671e+242, -8.12565116309796e-10, -2.15359476910305e-49, 6.45169663269538e+246, -8.81729169866251e-223, -1.72994822845411e-121, -1.62164229453157e+62, -7.11519061213384e-291, -5.96039842159441e+297, 1.78946373645738e-58, -2477302959913879552, 5.3208487330171e+104, 0), y = c(-3.26554114015345e+152, 3.18206849979581e-100, -1.56175745908205e-27, -1.53699520139895e-168, -3.45365770774053e-208, 1.64987860536309e+306, -8.4875906377816e+297, -2.24827554179635e+194, 3.33229472935506e-79, -1.74384352802101e+258, -1.48447741823703e+292, 1.13714330989583e-117, NaN, 2.26233751114504e-222, -1.01722477369572e+190, -Inf, 3.76448348749029e+191, NaN, -8.35948760513993e+225, -1.70730166016845e-156, 1.27082426005613e+293, -4.58747899892024e-154, 2.0190979016347e+305, -9.03381870805068e+126, 9.95505452219636e+88, -8.94698245383292e+169, 9.07030335339282e-230, -2.60502401687064e+201, 1.64945789439185e-87, 4.99710237875113e-12, -7.17761868684456e-304, -4.3658043201413e+77, -1.03383180363943e+79, -1.06914531395343e+103, -5.30285459292229e+216, -3.0565209504357e-130, -1.71729511302952e-159, -2.17924555299383e-269, 4.28642965121133e-95, -2.15390099076119e-268, -1.72195489251578e-249, 6.58367713694062e+280, 2.46336940755175e+76, -5.20477740765405e-262, 1.39905108202245e-99, 9.88192228952185e-25, -3.61551136030365e-45, -1.74619475327206e-101, 3.6666299164515e-120, -Inf, 7.17066122458692e-250, -2.378202461375e-231, -2.91706999191851e-31, 2.91754200415233e-52, -8.15309037183729e+138, -2.56543721632249e-216, 6.42400059829704e-260, -1.16133602395052e+62, 2.67059509197991e+205, 4.65822589964347e-299, 9.1666663674939e+232, 1.65110215729462e+101, Inf, 1.15230597511972e+138, 1.97267429248055e+163, 3.97553287750579e-113, -6.57481557461354e-05, 1.35715728528751e+180, -2.96150696524813e+235, -4.24956568308809e+21, 2.41521697023484e-123, -1.22597501534853e-99, 1.19023625716e+88, -1.19015768274565e-116, 1.29054759607899e+35, 2.64241368191397e-72, 4.58920593280658e+173, 1.62627920428666e-289, -1.47803007853383e+144, -6.15463639791464e-89, -1.05364750681707e+169, -2.45565549861933e-224, -1.98728021213713e-262, -2.92127556842489e-21, 6918497.86511318, 25063312106601372, -7.19944401535096e-57, -1.91849726575268e-287, 7.21116286537801e-227, -3.60207037565642e+110, -7.26637752801978e+235, 1.07246462937891e+118, 0), z = c(-4.84932478603115e+123, NaN, Inf, -5.7275290747398e+192, 5.58421313925681e+204, -2.28660386920664e+173, -1.97697637114179e-69, 3.38075822495626e+149, 4.32081599986107e-209, -1.29313617432814e-14, 4.71547010058052e-43, -250.127205510875, 1.46149092470503e+123, 9.40731273350158e-07, -2.12675472790339e-194, -1.65103679993611e-37, -1.86379765044409e-281, NaN, 6.08867901867669e+206, 5.98780222943024e-177, 5.3714752216163e+289, -3.45950447436657e+183, 1.49124941875444e+123, 1.5271822412807e+69, -1.11744397039332e+248, 2.54518372279385e+39, -1.89122208320009e+202, 1.32649719153595e-258, -4.98888109635576e-11, 2.78713015973591e-92, -2.19763919263402e-262, 1.0259819645017e-63, -6.65300857906418e+44, -7.06992781198399e-163, 1.97837196945601e+50, 8.66847551530699e+52, NA, 5.05828908606344e+82, -6.61537374830274e+258, -5.53117588312793e-242, -7.44546802785227e+176, 5.89018085455203e+172, 3.11179473946186e-269, 4.27273222011121e-258, 1.81205902881488e+292, 3.63659296687523e+150, -1.76238047328413e-172, 3.21076982012694e+97, -1.97697562608513e-32, -7.48931142066971e+103, -1.6935534783091e-223, -2.23568438386655e-290, 4.60738393919573e-112, -1.67433275761641, -1.45002600215092e-210, 4.589781857875e+224, -7.72209849174466e-23, 5.90208266243812e+79, -3.83061650675902e-208, 1.86512759599204e+231, 6.41943730702922e+301, 9.46104617841303e-28, 1.29593850784906e+277, -3.01659138117281e-306, -3.10960825603861e-93, -1.66248253051722e+24, 2.57961067707722e-78, 4.73689044138891e-82, -4.0456986884011e+22, -0.019009825744731, -1.52390505473001e-231, -1.14984339579174e+177, 1.66207346607985e+119, -1.56225281476421e-174, -3.19832453218957e-155, -Inf))
result <- do.call(AGread:::get_VM_C,testlist)
str(result)
|
37b166c69fb5c625105fbeeb71e9c9f57b169dce
|
164d2fe8bea6f6bc6e9f8397ae0a63c43b2a0768
|
/indicators/emergency_admissions.R
|
20bb772e10a3a037859c6dd8a4453775b8b29ea0
|
[] |
no_license
|
Public-Health-Scotland/covid-vulnerability
|
fab5753c64efa92cc50909da54a2c0437726af65
|
9385de13200613aea5baf5cd61053c21fe18bc95
|
refs/heads/master
| 2023-04-27T03:47:24.105962
| 2021-05-13T14:13:57
| 2021-05-13T14:13:57
| 249,380,562
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,148
|
r
|
emergency_admissions.R
|
# Emergency admissions and multiple emergency admissions(+65).
# Parts 1 and 2 take about ~20 minutes to run
# Part 1 - Extract data from SMRA
# Part 2 - Create the different geographies basefiles
# Part 3 - Run analysis functions
###############################################.
## Packages/Filepaths/Functions ----
###############################################.
source("1.indicator_analysis.R") #Normal indicator functions
#Function to create data for different geography levels
create_geo_levels <- function(geography, list_pos) {
#If there are multiple admissions in one year it selects one.
data_agg <- data_adm %>% rename(code = {{geography}} ) %>%
group_by(link_no, year, code) %>%
summarise(sex_grp = first(sex_grp), age_grp = first(age_grp), admissions = n()) %>%
ungroup()
#And now it aggregates total count of patients.
data_ea <- data_agg %>% group_by(year, code, sex_grp, age_grp) %>%
count() %>% ungroup() %>% rename(numerator = n)
data_ea_list[[list_pos]] <<- data_ea #assigning to list
#select only patients who have had 2 or more admissions and 65+.
data_ma <- data_agg %>%
filter(age_grp >= 14 & admissions >= 2 ) %>%
group_by(year, code, sex_grp, age_grp) %>%
count() %>% ungroup() %>% rename(numerator = n)
data_ma_list[[list_pos]] <<- data_ma #assigning to list
}
###############################################.
## Part 2 - Create the different geographies basefiles ----
###############################################.
data_adm <- readRDS('/PHI_conf/ScotPHO/Profiles/Data/Prepared Data/smr01_emergency_basefile.rds')
#creating file for emergency admissions and multiple admissions
data_ea_list <- list() #creating empty lists for placing data created by function
data_ma_list <- list()
# This will run the function for all those columns and for both indicators
mapply(create_geo_levels, geography = c("ca2019", "intzone2011", "datazone2011"),
list_pos = 1:3)
data_ea <- do.call("rbind", data_ea_list) # converting from list into dataframe
data_ma <- do.call("rbind", data_ma_list) # converting from list into dataframe
saveRDS(data_ea, paste0(data_folder, 'Prepared Data/ea_raw.rds'))
saveRDS(data_ma, paste0(data_folder, 'Prepared Data/ma_raw.rds'))
###############################################.
## Part 3 - Run analysis functions ----
###############################################.
# The function call uses a different geogrpahy to datazone11 or council as this way,
# it skips the parts of the function that bring the geographical info.
mapply(analyze_first, filename = c("ea", "ma"), geography = "all", measure = "stdrate",
pop = c("DZ11_pop_allages", "DZ11_pop_65+"), yearstart = 2014, yearend = 2018,
time_agg = 5, epop_age = "normal", hscp = T)
#Emergency admissions
analyze_second(filename = "ea", measure = "stdrate", time_agg = 5,
epop_total = 200000, ind_id = 20305, year_type = "calendar")
#Multiple emergency admissions for 65+
analyze_second(filename = "ma", measure = "stdrate", time_agg = 5,
epop_total = 39000, ind_id = 20306, year_type = "calendar")
##END
|
4ccb591d4e735e4fa9016c1acb2951710212ff66
|
9698bc9a642e549505c211078a5066e3d6a945cf
|
/Relectoral/man/Agregado_Prov_MIR.Rd
|
0fcc4666c48fdf90ed499c2c6715eb480b72b6f2
|
[] |
no_license
|
Miguelro/Electoral
|
9924eb77325feb6840db06ca0b8a2978fba3e0ad
|
b8c502bfcdd55d9042447d10379f096eb6b844e3
|
refs/heads/master
| 2020-12-07T22:36:18.297678
| 2020-06-24T08:24:54
| 2020-06-24T08:24:54
| 232,817,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,211
|
rd
|
Agregado_Prov_MIR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Leer_Datos.R
\name{Agregado_Prov_MIR}
\alias{Agregado_Prov_MIR}
\title{Download. Datos Agregado a nivel Provincial obtenidos del MIR (Ministerio Interior)}
\usage{
Agregado_Prov_MIR(Ano, Mes, Tipo, Ruta, Borrar = T)
}
\arguments{
\item{Ano}{El año de la eleccion cuatro dígito (YYYY). Puede ser numérico o texto}
\item{Mes}{El mes de la elección. Tiene que ser como texto. (Por ejemplo "06", correspondiente a junio)}
\item{Tipo}{El tipo de fichero a descargar: "Congreso" o "Europeas"}
\item{Ruta}{Es la ruta donde se descargarán los ficheros tipo zip del MIR}
\item{Borrar}{Es de tipo Boolean e indica si se borran o no los ficheros después de obtener el data.frame}
}
\value{
Objeto de tipo tbl_df con los datos del voto a nivel Provincial
}
\description{
Esta función sirve para descargar los datos en excel a nivel Provincial y
carga un data frame con esa información.¡¡¡Observación!!!: Los campos devueltos son todos
de tipo character, por lo que si es necesario hacer operaciones, habrá que convertir
los campos necsarios a numéricos
}
\examples{
c<-Agregado_Prov_MIR(2019,"05",Tipo = "Europeas","D:/")
}
|
88d5fd436d3c853a36e2b646480418bc981d4246
|
1233bd68fa715c898ea416f1945235bd1ee341ac
|
/scripts/cell_agg_fxn.R
|
d5ed2843067e694495d1d2cc90b87cb83550d09c
|
[] |
no_license
|
grapp1/mb_sensitivity
|
98a3ef97e989b99f945e452b2859efb77c0a05fe
|
783531044cd8877a21e32803543a0eb8bd4d8453
|
refs/heads/master
| 2021-06-14T06:36:09.843400
| 2020-09-01T00:56:39
| 2020-09-01T00:56:39
| 254,479,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,530
|
r
|
cell_agg_fxn.R
|
### 20191107 cell_agg_fxn
# function to aggregate particle data by cell (from XY data)
# returns data frame with X_cell, Y_cell, and desired statistic
cell_agg_fxn <- function(input_df,x_colname = "init_X",y_colname = "init_Y",agg_colname, funct = mean){
nx <- 91
ny <- 70
df_agg <- input_df
col1 <- which(colnames(df_agg)==x_colname)
col2 <- which(colnames(df_agg)==y_colname)
col3 <- which(colnames(df_agg)==agg_colname)
df_agg <- df_agg[c(col1,col2,col3)]
df_agg$X_cell <- as.integer(ceiling(df_agg$init_X/90))
df_agg$Y_cell <- as.integer(ceiling(df_agg$init_Y/90))
df_agg <- aggregate(x = df_agg[c(agg_colname)],
by = df_agg[c("X_cell",
"Y_cell")],
FUN = funct)
load("~/research/domain/watershed_mask.Rda")
df_agg <- full_join(df_agg,watershed_mask, by = c("X_cell", "Y_cell"))
for(i in 1:nx){
for(j in 1:ny){
if(is.na(df_agg[,agg_colname][df_agg$X_cell == i & df_agg$Y_cell == j]) & df_agg$flowpath[df_agg$X_cell == i & df_agg$Y_cell == j] == 1){
df_agg[,agg_colname][df_agg$X_cell == i & df_agg$Y_cell == j] <- -2 # setting cells that are inside domain with no data to -2
}
if(is.na(df_agg[,agg_colname][df_agg$X_cell == i & df_agg$Y_cell == j]) & df_agg$flowpath[df_agg$X_cell == i & df_agg$Y_cell == j] == 0){
df_agg[,agg_colname][df_agg$X_cell == i & df_agg$Y_cell == j] <- -1 # setting cells that are outside the domain to -1
}
}
}
return(df_agg)
}
|
0ceb0aec84814c507955c66ec8c4e121f5c47165
|
012c3d8e5a9bb435b0273687481f8c7f5b3e5175
|
/scripts/R/pres_abs_dendo.R
|
21558c2af388ffd4539381e88c60a6422940ee4b
|
[] |
no_license
|
BjornWouters/internship_uu
|
084ec1ee967790f20a9f78fea74c7f7dc6fcc676
|
2aa33fb64c172639de4e0f410833ba306ac4f55b
|
refs/heads/master
| 2020-03-23T00:08:57.847298
| 2018-12-10T14:05:12
| 2018-12-10T14:05:12
| 140,847,123
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,216
|
r
|
pres_abs_dendo.R
|
# load libraries
library(pvclust)
# biocLite("ComplexHeatmap")
# Set the working directory.
setwd('Documents/school/internship/repo/internship_uu/data/')
cov_file_pfs <- read.table('prepared_dataset.csv', sep = ',', row.names = 'mrna_id', header = 1)
cov_file_non_pfs <- read.table('prepared_test_dataset.csv', sep = ',', row.names = 'mrna_id', header = 1)
merged_file <- merge(cov_file_pfs, cov_file_non_pfs, by = "row.names", all = TRUE)
rownames(merged_file) <- merged_file[,'Row.names']
merged_file[,'Row.names'] <- NULL
merged_file[,'ES1513'] <- NULL
#full data range!!
# bootstraps the analysis to determine significance of observed cluster.
genecov.pv <- pvclust(merged_file, nboot=100, method.dist =
"euclidean", method.hclust = "ward.D2")
#boston.pv <- pvclust(Boston, nboot=100) # bootstraps the analysis to determine significance of observed cluster.
plot(genecov.pv)
genecov_scaled = apply(cov_file, 2, scale) # columns are scaled to allow trends to be seen.
Heatmap(genecov_scaled, cluster_columns = genecov.pv$hclust,
heatmap_legend_param = list(title = "Scaled Coverage")) ## this code combines heatmap and pvclust() generated dendrogram.
rownames(Genecov_final) <- c()
Heatmap(Genecov_final, cluster_columns = genecov.pv$hclust,
heatmap_legend_param = list(title = "Coverage")) ## this code combines heatmap and pvclust() generated dendrogram.
#full binary range!!
binary.pv <- pvclust(binary, nboot=100) # bootstraps the analysis to determine significance of observed cluster.
#boston.pv <- pvclust(Boston, nboot=100) # bootstraps the analysis to determine significance of observed cluster.
plot(binary.pv)
#binary_scaled = apply(binary, 2, scale) # columns are scaled to allow trends to be seen.
Heatmap(binary_scaled, cluster_columns = binary.pv$hclust,
heatmap_legend_param = list(title = "Scaled Binary Coverage")) ## this code combines heatmap and pvclust() generated dendrogram.
rownames(binary) <- c()
Heatmap(binary, cluster_columns = binary.pv$hclust,
heatmap_legend_param = list(title = "Binary Coverage")) ## this code combines heatmap and pvclust() generated dendrogram.
#See wich contigs reside with missing genes!!!!!
|
ad93563b79010497107ac9540c764ddf64b2234d
|
28c3f73a6d70c2fed4b2d2011bd1d9416a293b0e
|
/R/sequester.R
|
dbeab77718e158388d809ea05e442d1abad4fa6e
|
[] |
no_license
|
cdeterman/OmicsMarkeR
|
a8cbe69bc26f98db69b89c02949a3a4d0ab2d8a1
|
f9a0f3dfd067c0a0beb9ad421982ad86e63914cf
|
refs/heads/master
| 2021-01-10T20:58:11.111636
| 2017-01-10T15:14:14
| 2017-01-10T15:14:14
| 13,659,839
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,056
|
r
|
sequester.R
|
#' @title Sequester Additional Parameters
#' @description When the user provides additional arguments to either
#' \code{fs.stability} or \code{fs.ensembl.stability} this function will
#' extract the parameters to be fit if optimization is not used i.e.
#' \code{optimize = FALSE}.
#' @param theDots List of additional arguments
#' @param method Vector of strings listing models to be fit
#' @return Returns a list of the following elements
#' @return \item{parameters}{The parameters that will be fit to models}
#' @return \item{pnames}{The names of the specific parameters}
sequester <- function(theDots, method){
Value <- vector("list", length(method))
pnames <- vector()
names(Value) <- method
for(m in seq(along = method)){
vals <-
switch(tolower(method[m]),
plsda =
{
x <- which(names(theDots) == ".ncomp")
if(length(x) < 1){
stop("Error: If not autotuning or providing a grid,
PLSDA requires you to specify 'ncomp'")
}
Value[[m]] <- theDots[x]
pnames <- c(pnames, "ncomp")
},
gbm =
{
x <- which(names(theDots)
%in% c(".n.trees",
".interaction.depth",
".shrinkage"))
if(length(x) < 3){
stop("Error: If not autotuning or providing a grid,
GBM requires you to specify 'n.trees',
'interaction.depth', and 'shrinkage' values")
}
Value[[m]] <- theDots[x]
pnames <- c(pnames,
c("n.trees", "interaction.depth", "shrinkage"))
},
rf =
{
x <- which(names(theDots) == ".mtry")
if(length(x) < 1){
stop("Error: If not autotuning or providing a grid,
RandomForest requires you to specify 'mtry'")
}
Value[[m]] <- theDots[x]
pnames <- c(pnames, "mtry")
},
svm =
{
x <- which(names(theDots) == ".C")
if(length(x) < 1){
stop("Error: If not autotuning or providing a grid,
SVM requires you to specify 'C'")
}
Value[[m]] <- theDots[x]
pnames <- c(pnames, "C")
},
glmnet =
{
x <- which(names(theDots) %in% c(".lambda", ".alpha"))
if(length(x) < 1){
stop("Error: If not autotuning or providing a grid,
glmnet requires you to specify 'lambda' and 'alpha'")
}
Value[[m]] <- theDots[x]
pnames <- c(pnames, c("lambda", "alpha"))
},
pam =
{
x <- which(names(theDots) == ".threshold")
if(length(x) < 1){
stop("Error: If not autotuning or providing a grid,
PAM requires you to specify 'threshold'")
}
Value[[m]] <- theDots[x]
pnames <- c(pnames, "threshold")
}
)
}
out <- list(parameters = Value,
pnames = pnames)
out
}
|
c012f72a8d39fcc623415c8429a6ddcdf6269fc0
|
7d632db47db7c86fe8b94d9808d3454ad9fe8b8e
|
/assignment_3/assignment_gruber.R
|
925a844c9a14d59cdbf7a0a21e03034bc27de25c
|
[] |
no_license
|
corneliagru/comp_stat
|
83aca222491bcf0be04513e8370fdd219bbb89f6
|
71faa124f368df871218ab95ca379696842f8671
|
refs/heads/main
| 2023-02-10T14:53:15.934679
| 2021-01-12T12:56:57
| 2021-01-12T12:56:57
| 326,626,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,913
|
r
|
assignment_gruber.R
|
# import ------------------------------------------------------------------
data <- read.table("data.txt")
x <- data$V1
source("preparation.R")
# description -------------------------------------------------------------
#overview data
round(summary(x), digits = 2 )
# find best parameter settings. apparently there is only one group, all deltas
# are basically the same 2.37
param_k5 <- EM(x, k = 5)
param_k5$delta
# only one class
param <- EM(x, k = 1)
delta <- param$delta
p <- param$p
# plot data and density ---------------------------------------------------
xx = seq(0,8, by = 0.1)
yy <- sapply(xx, dens_plot, delta = delta, p = p)
plot(density(x), ylim = c(0,0.4), main = "")
lines(xx, yy, col = "red")
legend("topright", legend = c("observed data", "delta = 2.37"),
col = c("black", "red"),lty = 1, cex = 1.1)
# Metropolis hastings -----------------------------------------------------
#find settings for metropolis hastings algorithm
res <- MCMC(theta_0 = 1, delta = delta, p = p, n = 7000, sd = 4)
#acceptance around 30% wenn sd = 4
res$acceptance
acf(res$mc, lag.max = 50)
plot(ts(res$mc))
# -> n = 7000, burn in 1000, lag 20
samp <- MCMC_iid(res$mc, n = 300, lag = 20, burn_in = 1000)
# looks very similar
plot(density(samp))
lines(density(x), col = "red")
# confidence intervals ----------------------------------------------------
# check if non parallel works
res_bs <- bs_param(B = 10, delta = delta, p = p, parallel = FALSE)
apply(res_bs, 2, quantile, probs = c(0.025, 0.975))
#prepare
param <- EM(x, k = 1)
delta <- param$delta
p <- param$p
#parallelize
numCores <- detectCores()
registerDoParallel(numCores)
source("preparation.R")
start <- Sys.time()
res_bs <- bs_param(B = 1000, delta = delta, p = p, theta_0 = 1,
n = 300*lag + burn_in, sd = 4, lag = 20,
burn_in = 1000, parallel = TRUE)
end <- Sys.time()
print(end - start)
stopImplicitCluster()
ci <- apply(res_bs, 2, quantile, probs = c(0.025, 0.975))
ci
d_lower <- ci[1,1]
d_upper <- ci[2,1]
# plot CI ----------------------------------------------------------------
xx = seq(0,8, by = 0.1)
y_estim <- sapply(xx, dens_plot, delta = delta, p = p)
yy <- sapply(xx, dens_plot, delta = d_lower, p = p)
yyy <- sapply(xx, dens_plot, delta = d_upper, p = p)
plot(density(x), ylim = c(0,0.45), main = "", lwd = 2)
lines(xx, y_estim, col = "red", lty = "solid", lwd = 2)
lines(xx, yy, col = "gray20", lty = "dotted", lwd = 2)
lines(xx, yyy, col = "gray20", lty = "longdash", lwd = 2)
legend("topright", legend =
c(paste0("delta = ", round(d_lower, digits = 2)),
paste0("delta = ", round(delta, digits = 2)),
paste0("delta = ", round(d_upper, digits = 2)),
"true density"),
col = c("gray20","red", "gray20", "black"),
lty = c("dotted","solid", "longdash", "solid"), cex = 1.1, lwd = 2)
|
a8adf7778baf721ef4fbdae00e4a42b10e7b3403
|
fc7c8dc1c418f4ef10370e64d4d0fa957283885c
|
/man/embed_var.Rd
|
8e7a91a5532d2120f591200300c175962bee6427
|
[] |
no_license
|
cran/i2dash
|
195727ba7eeeb763a8e89558cd05991752597689
|
5848c079c871c384b02f2ef524f0b2af7f83f26f
|
refs/heads/master
| 2023-03-28T12:33:52.674409
| 2021-03-29T14:20:02
| 2021-03-29T14:20:02
| 267,008,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 538
|
rd
|
embed_var.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/components.R
\name{embed_var}
\alias{embed_var}
\title{A method to embed tabular data into an HTML link for download.}
\usage{
embed_var(x, ...)
}
\arguments{
\item{x}{Data, which will be written to the embedded file.}
\item{...}{Additional parameters.}
}
\value{
HTML code of a hyperlink containing the base64 encoded data.
}
\description{
A method to embed tabular data into an HTML link for download.
}
\examples{
embed_var(mtcars)
}
|
6c3b0dd5de6202602cbc2e03e2930cc5b71b676b
|
c3e6148a7a86fa46cc6aa3a80efadcf42a56cb06
|
/Old/gamlss_no_bccg_v3_cole.R
|
96bde37ea58b9e81b3aa4122c8da630c40e6f1bb
|
[] |
no_license
|
ljwright/gamlss-tutorial
|
3a90468cf3c433b4829eaa0deffb2a8a29a1cfc2
|
c8a3054c9ccac38d0cceb99c0f2ebdacf209e028
|
refs/heads/master
| 2023-08-27T11:11:23.217067
| 2021-11-12T17:38:04
| 2021-11-12T17:38:04
| 427,446,287
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,075
|
r
|
gamlss_no_bccg_v3_cole.R
|
#load the libaries
library(gamlss)
library(sitar)
library(tidyverse)
library(haven)
library(psych) #for the tabstat equivilent describeby to check sub group means, sd
#set wd
setwd("C:/Users/DAB/OneDrive - University College London/ideas/quantile 2/analysis/output")
#load all data
dat <- read_dta('cleaned_datafor_r.dta')
#bmi
bmi46<- subset(dat, select = c("bmi46", "sex", "fscb" , "pa42b"))
#bmi
describeBy(bmi46, group="sex" ,digits=2)
describeBy(bmi46, group="fscb" ,digits=2)
describeBy(bmi46, group="pa42b" ,digits=2)
#output estimates
bmi46sex_no <- gamlss(bmi46 ~ sex ,
sigma.formula = ~sex ,
family = NO (mu.link = log), data = na.omit(bmi46) )
bmi46fscb_no <- gamlss(bmi46 ~ fscb ,
sigma.formula = ~fscb ,
family = NO (mu.link = log), data = na.omit(bmi46) )
bmi46pa42b_no <- gamlss(bmi46 ~ pa42b ,
sigma.formula = ~pa42b ,
family = NO (mu.link = log), data = na.omit(bmi46) )
#mutually adjusted
bmi46adj_no <- gamlss(bmi46 ~ sex + fscb + pa42b ,
sigma.formula = ~sex + fscb + pa42b ,
family = NO (mu.link = log), data = na.omit(bmi46) )
##wemwebs
wem46<- subset(dat, select = c("wem46", "sex", "fscb" , "pa42b"))
wem46sex_no <- gamlss(wem46 ~ sex ,
sigma.formula = ~sex ,
family = NO (mu.link = log), data = na.omit(wem46) )
wem46fscb_no <- gamlss(wem46 ~ fscb ,
sigma.formula = ~fscb ,
family = NO (mu.link = log), data = na.omit(wem46) )
wem46pa42b_no <- gamlss(wem46 ~ pa42b ,
sigma.formula = ~pa42b ,
family = NO (mu.link = log), data = na.omit(wem46) )
#mutually adjusted
wem46adj_no <- gamlss(wem46 ~ sex + fscb + pa42b ,
sigma.formula = ~sex + fscb + pa42b ,
family = NO (mu.link = log), data = na.omit(wem46) )
##other syntax continued
bmi46fscb_bccg <- gamlss(bmi46 ~ fscb ,
sigma.formula = ~fscb ,
nu.formula = ~fscb ,
family = BCCG (mu.link = log), data = na.omit(bmi46) )
bmi46pa42b_bccg <- gamlss(bmi46 ~ pa42b ,
sigma.formula = ~pa42b ,
nu.formula = ~pa42b ,
family = BCCG (mu.link = log), data = na.omit(bmi46) )
#mutually adjusted
bmi46adj_bccg <- gamlss(bmi46 ~ sex + fscb + pa42b ,
sigma.formula = ~sex + fscb + pa42b ,
nu.formula = ~sex + fscb + pa42b ,
family = BCCG (mu.link = log), data = na.omit(bmi46) )
#wemwebs
wem46<- subset(dat, select = c("wem46", "sex", "fscb" , "pa42b"))
wem46sex_bccg <- gamlss(wem46 ~ sex ,
sigma.formula = ~sex ,
nu.formula = ~sex ,
family = BCCG (mu.link = log), data = na.omit(wem46) )
wem46fscb_bccg <- gamlss(wem46 ~ fscb ,
sigma.formula = ~fscb ,
nu.formula = ~fscb ,
family = BCCG (mu.link = log), data = na.omit(wem46) )
wem46pa42b_bccg <- gamlss(wem46 ~ pa42b ,
sigma.formula = ~pa42b ,
nu.formula = ~pa42b ,
family = BCCG (mu.link = log), data = na.omit(wem46) )
#mutually adjusted
wem46adj_bccg <- gamlss(wem46 ~ sex + fscb + pa42b ,
sigma.formula = ~sex + fscb + pa42b ,
nu.formula = ~sex + fscb + pa42b ,
family = BCCG (mu.link = log), data = na.omit(wem46) )
#output to file
sink(file = "table1_bmi_no_gamlss.txt")
#bmi
summary(bmi46sex_no)
summary(bmi46sex_bccg)
summary(bmi46fscb_no)
summary(bmi46fscb_bccg)
summary(bmi46pa42b_no)
summary(bmi46pa42b_bccg)
summary(bmi46adj_no)
summary(bmi46adj_bccg)
#stop output to file?
sink(file = NULL)
#output to file
sink(file = "table2_wemwebs_no_gamlss.txt")
#wemwebs
#wem
summary(wem46sex_no)
summary(wem46sex_bccg)
summary(wem46fscb_no)
summary(wem46fscb_bccg)
summary(wem46pa42b_no)
summary(wem46pa42b_bccg)
summary(wem46adj_no)
summary(wem46adj_bccg)
#stop output to file?
sink(file = NULL)
##Supplmentary table 1
##results using all categories of PA and FSC
#mutually adjusted
#set wd
setwd("C:/Users/DAB/OneDrive - University College London/ideas/quantile 2/analysis/output")
#load all data
dat <- read_dta('cleaned_datafor_r.dta')
#bmi
bmi46<- subset(dat, select = c("bmi46", "sex", "fsc" , "pa42"))
bmi46adj_no <- gamlss(bmi46 ~ sex + factor(fsc) + factor(pa42) ,
sigma.formula = ~sex + factor(fsc) + factor(pa42) ,
family = NO (mu.link = log), data = na.omit(bmi46) )
bmi46adj_bccg <- gamlss(bmi46 ~ sex + factor(fsc) + factor(pa42) ,
sigma.formula = ~sex + factor(fsc) + factor(pa42) ,
nu.formula = ~sex + factor(fsc) + factor(pa42) ,
family = BCCG (mu.link = log), data = na.omit(bmi46) )
#wemwebs
wem46<- subset(dat, select = c("wem46", "sex", "fsc" , "pa42"))
wem46adj_no <- gamlss(wem46 ~ sex + factor(fsc) + factor(pa42) ,
sigma.formula = ~sex + factor(fsc) + factor(pa42) ,
family = NO (mu.link = log), data = na.omit(wem46) )
wem46adj_bccg <- gamlss(wem46 ~ sex + factor(fsc) + factor(pa42) ,
sigma.formula = ~sex + factor(fsc) + factor(pa42) ,
nu.formula = ~sex + factor(fsc) + factor(pa42) ,
family = BCCG (mu.link = log), data = na.omit(wem46) )
#output to file
sink(file = "supple_all_categories_gamlss.txt")
#wemwebs
#wem
summary(bmi46adj_no)
summary(bmi46adj_bccg)
summary(wem46adj_no)
summary(wem46adj_bccg)
#stop output to file?
sink(file = NULL)
###same for simulation results [not included for now - results largely as expected in stata markdown]
|
6191a555b442ec80dcf5e3a8f5e1a94ba651e020
|
d434ec91242aad694c4e2d78580b60a9da3ce29a
|
/man/get_function_names.Rd
|
1217c4d1309acd339213757145fc3ebfc1ae4f7b
|
[
"MIT"
] |
permissive
|
rmsharp/rmsutilityr
|
01abcdbc77cb82eb4f07f6f5d8a340809625a1c5
|
d5a95e44663e2e51e6d8b0b62a984c269629f76c
|
refs/heads/master
| 2021-11-20T08:45:23.483242
| 2021-09-07T17:28:22
| 2021-09-07T17:28:22
| 97,284,042
| 0
| 2
|
MIT
| 2021-09-07T17:28:22
| 2017-07-15T01:17:14
|
R
|
UTF-8
|
R
| false
| true
| 586
|
rd
|
get_function_names.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_function_names.R
\name{get_function_names}
\alias{get_function_names}
\title{Get functions defined in a vector of lines}
\usage{
get_function_names(lines)
}
\arguments{
\item{lines}{character vector with text from file}
}
\value{
A character vector of function names or an empty character vector
}
\description{
Ignores all comment lines and defines a function name to be
the set of contiguous (non-blank characters) immediately prior to
`function\ *(` and must be the first non-blank token on a line.
}
|
c7af591e1bdc843ca60943650cdf2cf543ffbb2e
|
2eebaf7f9e3246d2453df9289574fba9e7f8151a
|
/tests/testthat/test_flowfinder_functions.R
|
51ab3b97c1271e21ed6b25d030b9aa535b6c2cee
|
[
"MIT"
] |
permissive
|
mikejohnson51/FlowFinder
|
b47f2e726c452770bfa755c0da90ff7d13a1da92
|
617610cb3d53229de23a43775892223f8f854162
|
refs/heads/master
| 2021-06-07T20:26:34.622266
| 2021-03-15T00:05:05
| 2021-03-15T00:05:05
| 136,057,097
| 6
| 1
|
MIT
| 2021-03-15T00:05:05
| 2018-06-04T17:00:17
|
R
|
UTF-8
|
R
| false
| false
| 279
|
r
|
test_flowfinder_functions.R
|
context("server functions")
test_that("check get_nomads_filelist routings",{
fileList = try(get_nomads_filelist(num = 40))
# Make sure 3 vals
expect_equal(length(fileList), 3)
# Right number of urls are returned
expect_equal(length(fileList$urls), 40)
})
|
b722653b5a25143084a118913520bbbc9c904aef
|
ad1f387f3030032f5b2c38c0bc70e80e5f2baa0a
|
/R/zzz.R
|
7fe2be5f94a2516faf8b39ad45fd4e91bfc49aee
|
[] |
no_license
|
YeLibrarian/RMassBank
|
89fde716ed662d872ff7b73d0e848bed196a28cb
|
c579455cff45d1971c029a9af9189a9e65429e16
|
refs/heads/master
| 2020-07-11T10:56:53.391534
| 2019-06-25T12:21:49
| 2019-06-25T12:21:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 433
|
r
|
zzz.R
|
.onLoad <- function(libname, pkgname) {
RMassBank.env <<- new.env()
RMassBank.env$ReadAnnotation <- FALSE
RMassBank.env$testnumber <- 1
## new variables
RMassBank.env$verbose.output <- FALSE
RMassBank.env$export.invalid <- FALSE
RMassBank.env$export.molfiles <- TRUE
RMassBank.env$strictMsMsSpectraSelection <- FALSE
mb <- list()
attach(RMassBank.env)
}
utils::globalVariables(c("cpdID", "isotopes","mzCalc"))
|
2bd461d8d25613a9cc5cde56a50265bde9490513
|
37efc02db536821a09cd3592009ae89993bfe2fa
|
/R/dummy_cols_rows.R
|
4617b54775bcababbb4b3b51c98bd95d6c1bb520
|
[] |
no_license
|
ChandlerLutz/CLmisc
|
7466ed60a13a6bf6b5971fc19753859497492da6
|
29a636ee291012da035c143378231dc2977f877d
|
refs/heads/master
| 2022-12-13T09:52:56.782097
| 2022-11-29T18:57:51
| 2022-11-29T18:57:51
| 121,792,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,580
|
r
|
dummy_cols_rows.R
|
## c:/Dropbox/Rpackages/CLmisc/R/dummy_cols_rows.R
## Chandler Lutz
## Questions/comments: cl.eco@cbs.dk
## $Revisions: 1.0.0 $Date: 2019-11-03
## Shamelessly stolen from
## https://github.com/jacobkap/fastDummies
check_type <- function(.data) {
if (data.table::is.data.table(.data)) {
data_type <- "is_data_table"
} else if (tibble::is_tibble(.data)) {
data_type <- "is_tibble"
} else {
data_type <- "is_data_frame"
}
return(data_type)
}
fix_data_type <- function(.data, data_type) {
if (data_type == "is_data_frame") {
.data <- as.data.frame(.data)
} else if (data_type == "is_tibble") {
.data <- tibble::as_tibble(.data)
}
return(.data)
}
#' Fast creation of dummy variables
#'
#' dummy_cols() quickly creates dummy (binary) columns from character and
#' factor type columns in the inputted data (and numeric columns if specified.)
#' This function is useful for statistical analysis when you want binary
#' columns rather than character columns.
#'
#' @family dummy functions
#' @seealso \code{\link{dummy_rows}} For creating dummy rows
#'
#' @param .data
#' An object with the data set you want to make dummy columns from.
#' @param select_columns
#' Vector of column names that you want to create dummy variables from.
#' If NULL (default), uses all character and factor columns.
#' @param remove_first_dummy
#' Removes the first dummy of every variable such that only n-1 dummies remain.
#' This avoids multicollinearity issues in models.
#' @param remove_most_frequent_dummy
#' Removes the most frequently observed category such that only n-1 dummies
#' remain. If there is a tie for most frequent, will remove the first
#' (by alphabetical order) category that is tied for most frequent.
#' @param ignore_na
#' If TRUE, ignores any NA values in the column. If FALSE (default), then it
#' will make a dummy column for value_NA and give a 1 in any row which has a
#' NA value.
#' @param split
#' A string to split a column when multiple categories are in the cell. For
#' example, if a variable is Pets and the rows are "cat", "dog", and "turtle",
#' each of these pets would become its own dummy column. If one row is "cat, dog",
#' then a split value of "," this row would have a value of 1 for both the cat
#' and dog dummy columns.
#'
#' @return
#' A data.frame (or tibble or data.table, depending on input data type) with
#' same number of rows as inputted data and original columns plus the newly
#' created dummy columns.
#' @export
#' @examples
#' crime <- data.frame(city = c("SF", "SF", "NYC"),
#' year = c(1990, 2000, 1990),
#' crime = 1:3)
#' dummy_cols(crime)
#' # Include year column
#' dummy_cols(crime, select_columns = c("city", "year"))
#' # Remove first dummy for each pair of dummy columns made
#' dummy_cols(crime, select_columns = c("city", "year"),
#' remove_first_dummy = TRUE)
dummy_cols <- function(.data,
select_columns = NULL,
remove_first_dummy = FALSE,
remove_most_frequent_dummy = FALSE,
ignore_na = FALSE,
split = NULL) {
stopifnot(is.null(select_columns) || is.character(select_columns),
select_columns != "",
is.logical(remove_first_dummy), length(remove_first_dummy) == 1)
if (remove_first_dummy == TRUE & remove_most_frequent_dummy == TRUE) {
stop("Select either 'remove_first_dummy' or 'remove_most_frequent_dummy'
to proceed.")
}
data_type <- check_type(.data)
if (!data.table::is.data.table(.data)) {
.data <- data.table::as.data.table(.data)
}
# Grabs column names that are character or factor class -------------------
if (!is.null(select_columns)) {
char_cols <- select_columns
cols_not_in_data <- char_cols[!char_cols %in% names(.data)]
char_cols <- char_cols[!char_cols %in% cols_not_in_data]
if (length(char_cols) == 0) {
stop("select_columns is/are not in data. Please check data and spelling.")
}
} else if (ncol(.data) == 1) {
char_cols <- names(.data)
} else {
char_cols <- sapply(.data, class)
char_cols <- char_cols[char_cols %in% c("factor", "character")]
char_cols <- names(char_cols)
}
if (length(char_cols) == 0 && is.null(select_columns)) {
stop(paste0("No character or factor columns found. ",
"Please use select_columns to choose columns."))
}
if (!is.null(select_columns) && length(cols_not_in_data) > 0) {
warning(paste0("NOTE: The following select_columns input(s) ",
"is not a column in data.\n"),
paste0(names(cols_not_in_data), "\t"))
}
for (col_name in char_cols) {
# If factor type, order by assigned levels
if (is.factor(.data[[col_name]])) {
unique_vals <- levels(.data[[col_name]])
if (any(is.na(.data[[col_name]]))) {
unique_vals <- c(unique_vals, NA)
}
# Else by order values appear.
} else {
unique_vals <- unique(.data[[col_name]])
}
unique_vals <- as.character(unique_vals)
# If there is a split value, splits up the unique_vals by that value
# and keeps only the unique ones.
if (!is.null(split)) {
unique_vals <- unique(trimws(unlist(strsplit(unique_vals, split = split))))
}
if (ignore_na) {
unique_vals <- unique_vals[!is.na(unique_vals)]
}
if (remove_most_frequent_dummy) {
vals <- as.character(.data[[col_name]])
vals <- data.frame(sort(table(vals), decreasing = TRUE),
stringsAsFactors = FALSE)
if (vals$Freq[1] > vals$Freq[2]) {
vals <- as.character(vals$vals[2:nrow(vals)])
unique_vals <- unique_vals[which(unique_vals %in% vals)]
unique_vals <- vals[order(match(vals, unique_vals))]
} else {
remove_first_dummy <- TRUE
}
}
if (remove_first_dummy) {
unique_vals <- unique_vals[-1]
}
data.table::alloc.col(.data, ncol(.data) + length(unique_vals))
data.table::set(.data, j = paste0(col_name, "_", unique_vals), value = 0L)
for (unique_value in unique_vals) {
data.table::set(.data, i =
which(data.table::chmatch(
as.character(.data[[col_name]]),
unique_value, nomatch = 0) == 1L),
j = paste0(col_name, "_", unique_value), value = 1L)
# Sets NA values to NA, only for columns that are not the NA columns
if (!is.na(unique_value)) {
data.table::set(.data, i =
which(is.na(.data[[col_name]])),
j = paste0(col_name, "_", unique_value), value = NA)
}
if (!is.null(split)) {
max_split_length <- max(sapply(strsplit(as.character(.data[[col_name]]),
split = split), length))
for (split_length in 1:max_split_length) {
data.table::set(.data, i =
which(data.table::chmatch(
as.character(trimws(sapply(strsplit(as.character(.data[[col_name]]),
split = split),
`[`, split_length))),
unique_value, nomatch = 0) == 1L),
j = paste0(col_name, "_", unique_value), value = 1L)
}
if (is.na(unique_value)) {
.data[[paste0(col_name, "_", unique_value)]][which(!is.na(.data[[col_name]]))] <- 0
}
}
}
}
.data <- fix_data_type(.data, data_type)
return(.data)
}
#' Fast creation of dummy rows
#'
#' dummy_rows() quickly creates dummy rows to fill in missing rows
#' based on all combinations of available character, factor, and
#' date columns (if not otherwise specified). This is useful for
#' creating balanced panel data. Columns that are not character,
#' factor, or dates are filled in with NA (or whatever value you
#' specify).
#'
#' @family dummy functions
#' @seealso \code{\link{dummy_cols}} For creating dummy columns
#'
#' @param .data
#' An object with the data set you want to make dummy columns from.
#' @param select_columns
#' If NULL (default), uses all character, factor, and Date columns to produce categories
#' to make the dummy rows by. If not NULL, you manually enter a string or vector of strings of columns name(s).
#' @param dummy_value
#' Value of the row for columns that are not selected.
#' Default is a value of NA.
#' @param dummy_indicator
#' Adds binary column to say if row is dummy or not (i.e. included in
#' original data or not)
#'
#' @return
#' A data.frame (or tibble or data.table, depending on input data type) with
#' same number of columns as inputted data and original rows plus the newly
#' created dummy rows
#' @export
#' @examples
#' crime <- data.frame(city = c("SF", "SF", "NYC"),
#' year = c(1990, 2000, 1990),
#' crime = 1:3)
#'
#' dummy_rows(crime)
#' # Include year column
#' dummy_rows(crime, select_columns = c("city", "year"))
#' # m=Make dummy value 0
#' dummy_rows(crime, select_columns = c("city", "year"),
#' dummy_value = 0)
#' # Add a dummy indicator
#' dummy_rows(crime, select_columns = c("city", "year"),
#' dummy_indicator = TRUE)
dummy_rows <- function(.data,
select_columns = NULL,
dummy_value = NA,
dummy_indicator = FALSE) {
stopifnot(is.null(select_columns) || is.character(select_columns),
select_columns != "",
is.logical(dummy_indicator), length(dummy_indicator) == 1,
length(dummy_value) == 1)
if (is.atomic(.data) || ncol(.data) == 1) {
stop("Cannot make dummy rows of a vector of one column data.frame/table.")
}
data_type <- check_type(.data)
if (!data.table::is.data.table(.data)) {
.data <- data.table::as.data.table(.data)
}
# Finds class of every column and keeps character, factor, and Date --------
if (is.null(select_columns)) {
char_cols <- sapply(.data, class)
char_cols <- names(.data)[char_cols %in%
c("character", "factor", "Date")]
if (length(char_cols) == 0) {
stop("No character, factor, or Date columns found. Please use select_columns")
}
} else {
char_cols <- select_columns
}
other_cols <- names(.data)[!names(.data) %in% char_cols]
# Finds how many possible combinations of the variables there are.
# This will be the number of rows in the new data
total_length <- prod(sapply(.data[, char_cols, with = FALSE, drop = FALSE],
data.table::uniqueN))
# Makes an empty data.table with right # of rows and columns. -------------
temp_table <- data.table::data.table(matrix(nrow = total_length,
ncol = ncol(.data)))
names(temp_table) <- names(.data)
# Fills in all possible combination rows ----------------------------------
for (i in char_cols) {
data.table::set(temp_table, j = i,
value = rep(unique(.data[[i]]), times =
total_length /
data.table::uniqueN(.data[[i]])))
temp_table <- data.table::setorderv(temp_table, i)
}
# Adds the dummy variable columns (and indicator) -------------------------
for (i in other_cols) {
data.table::set(temp_table, j = other_cols,
value = rep(dummy_value, nrow(temp_table)))
}
if (dummy_indicator) {
# Adding extra column
data.table::alloc.col(temp_table, ncol(temp_table) + 1)
data.table::alloc.col(.data, ncol(.data) + 1)
data.table::set(.data, j = "dummy_indicator", value = 0L)
data.table::set(temp_table, j = "dummy_indicator",
value = rep(1L, nrow(temp_table)))
}
# Removes rows that were in original data. --------------------------------
data_temp_pasting <- do.call(paste0, .data[, char_cols, with = FALSE,
drop = FALSE])
temp_temp_pasting <- do.call(paste0, temp_table[, char_cols, with = FALSE,
drop = FALSE])
temp_table <- subset(temp_table, !temp_temp_pasting %in% data_temp_pasting)
# Stacks new data on old data
if (nrow(temp_table) > 0) {
.data <- data.table::rbindlist(list(.data, temp_table), use.names = TRUE,
fill = TRUE)
}
.data <- fix_data_type(.data, data_type)
return(.data)
}
|
9bbf60b5e6fb3972e299857d6d83f518745c8e5d
|
667812a66716b979b7208a0e033e54e1558b8f59
|
/man/gpw4_deu2010.Rd
|
6e4443ede47858a0de2790ad6b923819fe5dbe3d
|
[] |
no_license
|
zhukovyuri/SUNGEO
|
686e4afb398257e3bc183ba1a695f6d253f406dc
|
7780b6e19e971d2a2f17a52c6c2906e65749dc23
|
refs/heads/master
| 2023-04-01T11:55:00.849451
| 2023-03-20T21:45:06
| 2023-03-20T21:45:06
| 254,211,865
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 966
|
rd
|
gpw4_deu2010.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{gpw4_deu2010}
\alias{gpw4_deu2010}
\title{Population count raster for Germany, 2010.}
\format{
class : SpatRaster
dimensions : 186, 220, 1 (nrow, ncol, nlyr)
resolution : 0.04166667, 0.04166667 (x, y)
extent : 5.875, 15.04167, 47.29167, 55.04167 (xmin, xmax, ymin, ymax)
coord. ref. : lon/lat WGS 84 (EPSG:4326)
source(s) : memory
name : gpw_v4_population_count_rev11_2010_2pt5_min
min value : 0.00
max value : 92915.66
}
\source{
Gridded Population of the World (GPW) v4: Population Count, v4.11 <doi:10.7927/H4JW8BX5>.
}
\usage{
gpw4_deu2010
}
\description{
2.5 arc-minute resolution raster of estimates of human population (number of persons per pixel),
consistent with national censuses and population registers, for the year 2010.
}
\keyword{datasets}
|
5441b0a2c831ee6a3cb4334dd330981c6b9d9be4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/maptools/examples/gcDestination.Rd.R
|
fa81c763222bb1cda9a54e0f77bfa14a045cf8de
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
gcDestination.Rd.R
|
library(maptools)
### Name: gcDestination
### Title: Find destination in geographical coordinates
### Aliases: gcDestination
### Keywords: spatial
### ** Examples
data(state)
res <- gcDestination(state.center$x, state.center$y, 45, 250, "km")
plot(state.center$x, state.center$y, asp=1, pch=16)
arrows(state.center$x, state.center$y, res[,1], res[,2], length=0.05)
llist <- vector(mode="list", length=length(state.center$x))
for (i in seq(along=llist)) llist[[i]] <- gcDestination(state.center$x[i],
state.center$y[i], seq(0, 360, 5), 250, "km")
plot(state.center$x, state.center$y, asp=1, pch=3)
nll <- lapply(llist, lines)
|
6da1fb407d60687c45dfa8328505c55c375e9647
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleappenginev1alpha.auto/man/StaticFilesHandler.httpHeaders.Rd
|
590c50a42fc22badb93a534a8d4ea97be7993271
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
StaticFilesHandler.httpHeaders.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appengine_objects.R
\name{StaticFilesHandler.httpHeaders}
\alias{StaticFilesHandler.httpHeaders}
\title{StaticFilesHandler.httpHeaders Object}
\usage{
StaticFilesHandler.httpHeaders()
}
\value{
StaticFilesHandler.httpHeaders object
}
\description{
StaticFilesHandler.httpHeaders Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
HTTP headers to use for all responses from these URLs.
}
\seealso{
Other StaticFilesHandler functions: \code{\link{StaticFilesHandler}}
}
|
1011d314d8c9e766b6b6b8592744609fcaac59de
|
e6c64291c457aec015f548a67572554c8f69269a
|
/man/parse_pattern.Rd
|
9f9d6f6b52d13051ece92145f26e9cd1b28733e6
|
[] |
no_license
|
MatteoLacki/LFQBench2
|
ba0d32b6d13f032df9fc528f5674fab466ce021e
|
ed5e175716b3ccfd3979466a569c2150dd7b9e52
|
refs/heads/master
| 2020-04-21T12:50:04.356863
| 2020-03-30T16:20:24
| 2020-03-30T16:20:24
| 169,577,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 669
|
rd
|
parse_pattern.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wide2long.R
\name{parse_pattern}
\alias{parse_pattern}
\title{Parse column pattern.}
\usage{
parse_pattern(s)
}
\arguments{
\item{s}{Teh pattern to match}
}
\value{
a list with the pattern without group names, and the found names.
}
\description{
The parser uses a slighttly modified version of 'stringr' library.
The modification lets you name the groups.
A group is a regular expression in brackets, like '(...)'.
You can name the group by supplying it between two columns, for instance
(:condition:...) will match a group of 3 symbols (the three dots), and call
that group 'condition'.
}
|
0477f5890a991ae0fe271c6df4e7bfbe086a822a
|
82e6137d5d2a0e9114f76c7e427514bba62aaaf3
|
/shinylego-master1/R/mod_display_lego_2d.R
|
6f206314ab33e7a4f4619550cd5ecb57c98f47ee
|
[
"MIT"
] |
permissive
|
lukuiR/Rpublic
|
3a0c25519d10457bc08d6d3a8510865212943a37
|
daa067ca078ddce54bb4d822666d4e9f9335c6a5
|
refs/heads/master
| 2022-06-28T16:10:50.713523
| 2022-06-21T14:48:48
| 2022-06-21T14:48:48
| 119,683,495
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,092
|
r
|
mod_display_lego_2d.R
|
# Module UI
#' @title mod_display_lego_2dui and mod_display_lego_2d
#' @description A shiny Module that ...
#'
#' @param id shiny id
#'
#' @export
#' @importFrom shiny NS tagList
#' @examples
mod_display_lego_2dui <- function(id, height_window = 500){
ns <- NS(id)
tagList(
withLoader(
plotOutput(
ns("mosaic_2d"),
height = paste0(height_window, 'px')
),
type = "image",
loader = "www/lego_loader.gif")
)
}
# Module server
#' mod_display_lego_2d server function
#'
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @export
#' @rdname mod_display_lego_2dui
mod_display_lego_2d <- function(input, output, session, image_lego_obj){
ns <- session$ns
# reactive for plot object
image_obj <- reactive({
display_set(image_lego_obj(), title = NULL)
})
# display 2d lego plot
output$mosaic_2d <- renderPlot({
print(image_obj())
})
}
## To be copied in the UI
# mod_display_lego_2dui("m1")
## To be copied in the server
# callModule(mod_display_lego_2d, "m1")
|
0723eb8061996ae421ef119a18dbff754af6a834
|
40845868e6803507d19d8b7ae63dea0ec0a109dd
|
/lab6/fibonacci_bonus.r
|
74ce2b7c3b54b858d12f5cf36844867b2c60c912
|
[] |
no_license
|
srravula1/NCTU-R-Programming-2018
|
42f3ddcc24fdabc78bbb0029791e35e497f446fd
|
0cbf426132db8848c97ab5fa4811e4d35d97e82b
|
refs/heads/master
| 2021-03-14T03:34:45.708922
| 2019-03-04T18:13:46
| 2019-03-04T18:13:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
r
|
fibonacci_bonus.r
|
data.file = "hw6_input.csv"
ans.file = "hw6_answer.csv"
data<-read.csv(data.file, header=FALSE, sep=",")
ans <- read.csv(ans.file, header=FALSE, sep=",")
len<-30
f<-numeric(len)
r_fibonacci <- function(n){
if(n<=1){
return(n)
}
else{
return(r_fibonacci(n-1)+r_fibonacci(n-2))
}
}
for(n in data){
my_ans <- r_fibonacci(n)
if(my_ans!=ans[n]){print(c("error", n, my_ans, ans[n]))}
}
|
a7b080d81b1251c9cc877b9fba2d3e141163a159
|
2cc56a6341f179923977128ad90bb31419e033d0
|
/R/get_deviance.R
|
5df0380c157fcd0d217096c2771086b3e8ccd7c8
|
[] |
no_license
|
cran/insight
|
5e1d2d1c46478c603b491f53aa80de57bc8f54b4
|
247206683ad374a1ba179356410d095f6861aede
|
refs/heads/master
| 2023-07-19T11:33:37.490704
| 2023-06-29T13:30:02
| 2023-06-29T13:30:02
| 174,554,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,441
|
r
|
get_deviance.R
|
#' Model Deviance
#'
#' Returns model deviance (see `stats::deviance()`).
#'
#' @param ... Not used.
#' @inheritParams get_residuals
#'
#' @return The model deviance.
#'
#' @details For GLMMs of class `glmerMod`, `glmmTMB` or `MixMod`,
#' the *absolute unconditional* deviance is returned (see 'Details' in
#' `?lme4::merMod-class`), i.e. minus twice the log-likelihood. To get
#' the *relative conditional* deviance (relative to a saturated model,
#' conditioned on the conditional modes of random effects), use `deviance()`.
#' The value returned `get_deviance()` usually equals the deviance-value
#' from the `summary()`.
#'
#' @examples
#' data(mtcars)
#' x <- lm(mpg ~ cyl, data = mtcars)
#' get_deviance(x)
#' @export
get_deviance <- function(x, ...) {
UseMethod("get_deviance")
}
#' @rdname get_deviance
#' @export
get_deviance.default <- function(x, verbose = TRUE, ...) {
dev <- .safe(stats::deviance(x, ...))
if (is.null(dev)) {
dev <- .safe(x$deviance)
}
if (is.null(dev)) {
dev <- .safe(sum(get_residuals(x, weighted = TRUE, verbose = verbose)^2, na.rm = TRUE))
}
dev
}
#' @export
get_deviance.stanreg <- function(x, verbose = TRUE, ...) {
info <- model_info(x)
if (info$is_linear) {
res <- get_residuals(x, weighted = TRUE, verbose = verbose)
dev <- sum(res^2, na.rm = TRUE)
} else if (info$is_binomial) {
w <- get_weights(x, null_as_ones = TRUE, verbose = verbose)
n <- n_obs(x)
y <- get_response(x, as_proportion = TRUE, verbose = FALSE)
mu <- stats::fitted(x)
dev_resids_fun <- x$family$dev.resids
dev <- sum(dev_resids_fun(y, mu, w))
} else {
format_error("Could not compute deviance for this type of model.")
}
# Not sure if it generalizes to other models though since deviance.glm
# extracts it via x@deviance
dev
}
#' @export
get_deviance.lmerMod <- function(x, REML = FALSE, ...) {
stats::deviance(x, REML = REML, ...)
}
#' @export
get_deviance.lrm <- function(x, ...) {
d <- stats::deviance(x, ...)
d[length(d)]
}
#' @export
get_deviance.glmmTMB <- function(x, ...) {
.safe(-2 * as.numeric(get_loglikelihood(x, ...)))
}
#' @export
get_deviance.glmerMod <- get_deviance.glmmTMB
#' @export
get_deviance.MixMod <- get_deviance.glmmTMB
#' @export
get_deviance.model_fit <- function(x, ...) {
get_deviance(x$fit, ...)
}
|
9c55e4ea39f34c343f9fde76f4d115455d0e1544
|
600fe48edbaaaa9e66baba760cd3a79bec023fc9
|
/project.r
|
595d50728414427162cf45f9bd4f36a7632ff23c
|
[] |
no_license
|
justrite/bumblebees
|
a2bdf599af96cec55b32323bebd8dd5f4dd74cb1
|
2dd05be242d821050a15ac22e575ac92b7b49b8b
|
refs/heads/master
| 2022-11-17T18:03:50.790945
| 2020-07-21T17:40:03
| 2020-07-21T17:40:03
| 281,457,070
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,022
|
r
|
project.r
|
sink("output.txt")
data = read.csv("DryadSubmission/data_bombclimate.csv")
data$period = ''
data[which(data$periodFrom == 1901),]$period = "historic"
data[which(data$periodFrom == 1975),]$period = "early"
data[which(data$periodFrom == 1987),]$period = "middle"
data[which(data$periodFrom == 1999),]$period = "late"
data$period = as.factor(data$period)
data = na.omit(data)
historic_EU = data[which(data$period == "historic" & data$continent == "EUR"),]
early_EU = data[which(data$period == "early" & data$continent == "EUR"),]
middle_EU = data[which(data$period == "middle" & data$continent == "EUR"),]
late_EU = data[which(data$period == "late" & data$continent == "EUR"),]
elevs = c(mean(historic_EU$elevation), mean(early_EU$elevation),mean(middle_EU$elevation), mean(late_EU$elevation))
lats = c(mean(historic_EU$kmNorthEquator), mean(early_EU$kmNorthEquator), mean(middle_EU$kmNorthEquator), mean(late_EU$kmNorthEquator))
minTemps = c(mean(historic_EU$minPeriodAnnualMeanT), mean(early_EU$minPeriodAnnualMeanT), mean(middle_EU$minPeriodAnnualMeanT), mean(late_EU$minPeriodAnnualMeanT))
maxTemps = c(mean(historic_EU$maxPeriodAnnualMeanT), mean(early_EU$maxPeriodAnnualMeanT), mean(middle_EU$maxPeriodAnnualMeanT), mean(late_EU$maxPeriodAnnualMeanT))
### Simulations
par(mfrow=c(4, 2))
historic_dist_sim = rnorm(10000, mean(historic_EU$kmNorthEquator), sd(historic_EU$kmNorthEquator))
plot(density(historic_EU$kmNorthEquator), main = "Historic EU Actual Distribution")
plot(density(historic_dist_sim), main = "Historic EU Simulated Distribution")
early_dist_sim = rnorm(10000, mean(early_EU$kmNorthEquator), sd(early_EU$kmNorthEquator))
plot(density(early_EU$kmNorthEquator), main = "Early EU Actual Distribution")
plot(density(early_dist_sim), main = "Early EU Simulated Distribution")
middle_dist_sim = rnorm(10000, mean(middle_EU$kmNorthEquator), sd(middle_EU$kmNorthEquator))
plot(density(middle_EU$kmNorthEquator), main = "Middle EU Actual Distribution")
plot(density(middle_dist_sim), main = "Middle EU Simulated Distribution")
late_dist_sim = rnorm(10000, mean(late_EU$kmNorthEquator), sd(late_EU$kmNorthEquator))
plot(density(late_EU$kmNorthEquator), main = "Late EU Actual Distribution")
plot(density(late_dist_sim), main = "Late EU Simulated distribution")
cat("K-S tests: \n")
print(ks.test(historic_EU$kmNorthEquator, early_EU$kmNorthEquator)$p.value)
print(ks.test(historic_dist_sim, early_dist_sim)$p.value)
print(ks.test(early_EU$kmNorthEquator, middle_EU$kmNorthEquator)$p.value)
print(ks.test(middle_dist_sim, early_dist_sim)$p.value)
print(ks.test(middle_EU$kmNorthEquator, late_EU$kmNorthEquator)$p.value)
print(ks.test(late_dist_sim, middle_dist_sim)$p.value)
par(mfrow = c(3, 1))
plot(density((early_dist_sim - historic_dist_sim)), main = "Difference Between Early and Historic")
lines(density(rnorm(10000, 0, sd(early_dist_sim - historic_dist_sim))), col = 'red')
plot(density((middle_dist_sim - early_dist_sim)), main = "Difference Between Middle and Early")
lines(density(rnorm(10000, 0, sd(middle_dist_sim - early_dist_sim))), col = 'red')
plot(density((late_dist_sim - middle_dist_sim)), main = "Difference Between Late and Middle")
lines(density(rnorm(10000, 0, sd(late_dist_sim - middle_dist_sim))), col = 'red')
stat_sim = function(data, INDEX){
mu = mean(data[INDEX])
return(mu)
}
cat("Tests with simulations: \n")
boot_sim_late = boot((late_dist_sim - middle_dist_sim), stat = stat_sim, R = 1000)
boot_sim_middle = boot((middle_dist_sim - early_dist_sim), stat = stat_sim, R = 1000)
boot_sim_early = boot((early_dist_sim - historic_dist_sim), stat = stat_sim, R = 1000)
cat(t.test(boot_sim_late$t, boot_sim_middle$t, alternative = "greater")$p.value, '\n')
cat(wilcox.test(boot_sim_late$t, boot_sim_middle$t, alternative = "greater")$p.value, '\n')
cat(t.test(boot_sim_middle$t, boot_sim_early$t, alternative = "greater")$p.value, '\n')
cat(wilcox.test(boot_sim_middle$t, boot_sim_early$t, alternative = "greater")$p.value, '\n')
### Analysis
### Compute Proportion of Differences
###################################################################################################
elev_pvals_hist = c()
elev_pvals_early = c()
elev_pvals_mid = c()
cat("Elevation: \n")
for(species in unique(historic_EU$species)){
elev_pvals_hist = c(elev_pvals_hist, ks.test(historic_EU[which(historic_EU$species == species),]$elevation, early_EU[which(early_EU$species == species),]$elevation)$p.value)
elev_pvals_early = c(elev_pvals_early, ks.test(early_EU[which(early_EU$species == species),]$elevation, middle_EU[which(middle_EU$species == species),]$elevation)$p.value)
elev_pvals_mid = c(elev_pvals_mid, ks.test(late_EU[which(late_EU$species == species),]$elevation, middle_EU[which(middle_EU$species == species),]$elevation)$p.value)
}
cat(sum(p.adjust(elev_pvals_hist, n = length(elev_pvals_hist)) < 0.05)/length(elev_pvals_hist), '\n')
cat(sum(p.adjust(elev_pvals_early, n = length(elev_pvals_early)) < 0.05)/length(elev_pvals_early), '\n')
cat(sum(p.adjust(elev_pvals_mid, n = length(elev_pvals_mid)) < 0.05)/length(elev_pvals_mid), '\n')
cat("Distance: \n")
dist_pvals_hist = c()
dist_pvals_early = c()
dist_pvals_mid = c()
for(species in unique(historic_EU$species)){
dist_pvals_hist = c(dist_pvals_hist, ks.test(historic_EU[which(historic_EU$species == species),]$kmNorthEquator, early_EU[which(early_EU$species == species),]$kmNorthEquator)$p.value)
dist_pvals_early = c(dist_pvals_early, ks.test(early_EU[which(early_EU$species == species),]$kmNorthEquator, middle_EU[which(middle_EU$species == species),]$kmNorthEquator)$p.value)
dist_pvals_mid = c(dist_pvals_mid, ks.test(late_EU[which(late_EU$species == species),]$kmNorthEquator, middle_EU[which(middle_EU$species == species),]$kmNorthEquator)$p.value)
}
cat(sum(p.adjust(dist_pvals_hist, n = length(dist_pvals_hist)) < 0.05)/length(dist_pvals_hist), '\n')
cat(sum(p.adjust(dist_pvals_early, n = length(dist_pvals_early)) < 0.05)/length(dist_pvals_early), '\n')
cat(sum(p.adjust(dist_pvals_mid, n = length(dist_pvals_mid)) < 0.05)/length(dist_pvals_mid), '\n')
cat("Min: \n")
min_pvals_hist = c()
min_pvals_early = c()
min_pvals_mid = c()
for(species in unique(historic_EU$species)){
min_pvals_hist = c(min_pvals_hist, ks.test(historic_EU[which(historic_EU$species == species),]$minPeriodAnnualMeanT, early_EU[which(early_EU$species == species),]$minPeriodAnnualMeanT)$p.value)
min_pvals_early = c(min_pvals_early, ks.test(early_EU[which(early_EU$species == species),]$minPeriodAnnualMeanT, middle_EU[which(middle_EU$species == species),]$minPeriodAnnualMeanT)$p.value)
min_pvals_mid = c(min_pvals_mid, ks.test(late_EU[which(late_EU$species == species),]$minPeriodAnnualMeanT, middle_EU[which(middle_EU$species == species),]$minPeriodAnnualMeanT)$p.value)
}
cat(sum(p.adjust(min_pvals_hist, n = length(min_pvals_hist)) < 0.05)/length(min_pvals_hist), '\n')
cat(sum(p.adjust(min_pvals_early, n = length(min_pvals_early)) < 0.05)/length(min_pvals_early), '\n')
cat(sum(p.adjust(min_pvals_mid, n = length(min_pvals_mid)) < 0.05)/length(min_pvals_mid), '\n')
cat("Max: \n")
max_pvals_hist = c()
max_pvals_early = c()
max_pvals_mid = c()
for(species in unique(historic_EU$species)){
max_pvals_hist = c(max_pvals_hist, ks.test(historic_EU[which(historic_EU$species == species),]$maxPeriodAnnualMeanT, early_EU[which(early_EU$species == species),]$maxPeriodAnnualMeanT)$p.value)
max_pvals_early = c(max_pvals_early, ks.test(early_EU[which(early_EU$species == species),]$maxPeriodAnnualMeanT, middle_EU[which(middle_EU$species == species),]$maxPeriodAnnualMeanT)$p.value)
max_pvals_mid = c(max_pvals_mid, ks.test(late_EU[which(late_EU$species == species),]$maxPeriodAnnualMeanT, middle_EU[which(middle_EU$species == species),]$maxPeriodAnnualMeanT)$p.value)
}
cat(sum(p.adjust(max_pvals_hist, n = length(max_pvals_hist)) < 0.05)/length(max_pvals_hist), '\n')
cat(sum(p.adjust(max_pvals_early, n = length(max_pvals_early)) < 0.05)/length(max_pvals_early), '\n')
cat(sum(p.adjust(max_pvals_mid, n = length(max_pvals_mid)) < 0.05)/length(max_pvals_mid), '\n')
### Compute Differences for Distance from Equator
###################################################################################################
stat_dist = function(data, INDEX){
mu = mean(data[INDEX,]$kmNorthEquator)
return(mu)
}
boots_historic = data.frame(rep(1, 1000))
boots_early = data.frame(rep(1, 1000))
boots_middle = data.frame(rep(1, 1000))
boots_late = data.frame(rep(1, 1000))
for(species in unique(historic_EU$species)){
bt_dist = boot(historic_EU[which(historic_EU$species == species), ], stat = stat_dist, R = 1000)
boots_historic[species] = bt_dist$t
}
####################################################################################################
for(species in unique(early_EU$species)){
bt_dist = boot(early_EU[which(early_EU$species == species), ], stat = stat_dist, R = 1000)
boots_early[species] = bt_dist$t
}
####################################################################################################
for(species in unique(middle_EU$species)){
bt_dist = boot(middle_EU[which(middle_EU$species == species), ], stat = stat_dist, R = 1000)
boots_middle[species] = bt_dist$t
}
####################################################################################################
for(species in unique(late_EU$species)){
bt_dist = boot(late_EU[which(late_EU$species == species), ], stat = stat_dist, R = 1000)
boots_late[species] = bt_dist$t
}
cat("Distance from equator: \n")
cat("Late - Middle vs Middle - Early\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_late[species] - boots_middle[species]), abs(boots_middle[species] - boots_early[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_late[,species] - boots_middle[,species]), abs(boots_middle[,species] - boots_early[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals, n = length(pvals)) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p, n = length(wilcox_p)) < 0.05)/length(wilcox_p), '\n')
cat("Middle - Early vs Early - Historic\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_middle[species] - boots_early[species]), abs(boots_early[species] - boots_historic[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_middle[,species] - boots_early[,species]), abs(boots_early[,species] - boots_historic[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals, n = length(pvals)) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p, n = length(wilcox_p)) < 0.05)/length(wilcox_p), '\n')
### Compute Differences for Elevation
###################################################################################################
stat_elev = function(data, INDEX){
mu = mean(data[INDEX,]$elevation)
return(mu)
}
boots_historic = data.frame(rep(1, 1000))
boots_early = data.frame(rep(1, 1000))
boots_middle = data.frame(rep(1, 1000))
boots_late = data.frame(rep(1, 1000))
for(species in unique(historic_EU$species)){
bt_dist = boot(historic_EU[which(historic_EU$species == species), ], stat = stat_elev, R = 1000)
boots_historic[species] = bt_dist$t
}
####################################################################################################
for(species in unique(early_EU$species)){
bt_dist = boot(early_EU[which(early_EU$species == species), ], stat = stat_elev, R = 1000)
boots_early[species] = bt_dist$t
}
####################################################################################################
for(species in unique(middle_EU$species)){
bt_dist = boot(middle_EU[which(middle_EU$species == species), ], stat = stat_elev, R = 1000)
boots_middle[species] = bt_dist$t
}
####################################################################################################
for(species in unique(late_EU$species)){
bt_dist = boot(late_EU[which(late_EU$species == species), ], stat = stat_elev, R = 1000)
boots_late[species] = bt_dist$t
}
cat("Elevation: \n")
cat("Late - Middle vs Middle - Early\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_late[species] - boots_middle[species]), abs(boots_middle[species] - boots_early[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_late[,species] - boots_middle[,species]), abs(boots_middle[,species] - boots_early[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p) < 0.05)/length(wilcox_p), '\n')
cat("Middle - Early vs Early - Historic\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_middle[species] - boots_early[species]), abs(boots_early[species] - boots_historic[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_middle[,species] - boots_early[,species]), abs(boots_early[,species] - boots_historic[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p) < 0.05)/length(wilcox_p), '\n')
### Compute Differences for Min Annual Mean Temp
##########################################################################################################################################################
stat_min = function(data, INDEX){
mu = mean(data[INDEX,]$minPeriodAnnualMeanT)
return(mu)
}
boots_historic = data.frame(rep(1, 1000))
boots_early = data.frame(rep(1, 1000))
boots_middle = data.frame(rep(1, 1000))
boots_late = data.frame(rep(1, 1000))
for(species in unique(historic_EU$species)){
bt_dist = boot(historic_EU[which(historic_EU$species == species), ], stat = stat_min, R = 1000)
boots_historic[species] = bt_dist$t
}
####################################################################################################
for(species in unique(early_EU$species)){
bt_dist = boot(early_EU[which(early_EU$species == species), ], stat = stat_min, R = 1000)
boots_early[species] = bt_dist$t
}
####################################################################################################
for(species in unique(middle_EU$species)){
bt_dist = boot(middle_EU[which(middle_EU$species == species), ], stat = stat_min, R = 1000)
boots_middle[species] = bt_dist$t
}
####################################################################################################
for(species in unique(late_EU$species)){
bt_dist = boot(late_EU[which(late_EU$species == species), ], stat = stat_min, R = 1000)
boots_late[species] = bt_dist$t
}
cat("Min: \n")
cat("Late - Middle vs Middle - Early\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_late[species] - boots_middle[species]), abs(boots_middle[species] - boots_early[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_late[,species] - boots_middle[,species]), abs(boots_middle[,species] - boots_early[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p) < 0.05)/length(wilcox_p), '\n')
cat("Middle - Early vs Early - Historic\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_middle[species] - boots_early[species]), abs(boots_early[species] - boots_historic[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_middle[,species] - boots_early[,species]), abs(boots_early[,species] - boots_historic[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p) < 0.05)/length(wilcox_p), '\n')
### Compute Differences for Max Annual Mean Temp
################################################################################################################################################
stat_max = function(data, INDEX){
mu = mean(data[INDEX,]$maxPeriodAnnualMeanT)
return(mu)
}
boots_historic = data.frame(rep(1, 1000))
boots_early = data.frame(rep(1, 1000))
boots_middle = data.frame(rep(1, 1000))
boots_late = data.frame(rep(1, 1000))
for(species in unique(historic_EU$species)){
bt_dist = boot(historic_EU[which(historic_EU$species == species), ], stat = stat_max, R = 1000)
boots_historic[species] = bt_dist$t
}
####################################################################################################
for(species in unique(early_EU$species)){
bt_dist = boot(early_EU[which(early_EU$species == species), ], stat = stat_max, R = 1000)
boots_early[species] = bt_dist$t
}
####################################################################################################
for(species in unique(middle_EU$species)){
bt_dist = boot(middle_EU[which(middle_EU$species == species), ], stat = stat_max, R = 1000)
boots_middle[species] = bt_dist$t
}
####################################################################################################
for(species in unique(late_EU$species)){
bt_dist = boot(late_EU[which(late_EU$species == species), ], stat = stat_max, R = 1000)
boots_late[species] = bt_dist$t
}
cat("Max: \n")
cat("Late - Middle vs Middle - Early\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_late[species] - boots_middle[species]), abs(boots_middle[species] - boots_early[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_late[,species] - boots_middle[,species]), abs(boots_middle[,species] - boots_early[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p) < 0.05)/length(wilcox_p), '\n')
cat("Middle - Early vs Early - Historic\n")
pvals = c()
wilcox_p = c()
for(species in unique(historic_EU$species)){
pvals = c(pvals, t.test(abs(boots_middle[species] - boots_early[species]), abs(boots_early[species] - boots_historic[species]), alternative = "greater")$p.value)
wilcox_p = c(wilcox_p, wilcox.test(abs(boots_middle[,species] - boots_early[,species]), abs(boots_early[,species] - boots_historic[,species]), alternative = "greater")$p.value)
}
cat(sum(p.adjust(pvals) < 0.05)/length(pvals), '\n')
cat(sum(p.adjust(wilcox_p) < 0.05)/length(wilcox_p), '\n')
sink()
|
9d616272865eed8f5976c8db257a14324f36c6c4
|
38878da75f684d6a701d910e747cba37d93bf36c
|
/run_analysis.R
|
27428cf6acf31ea3458c63ba55a383845ac1da96
|
[] |
no_license
|
joshuaburkhow/PersonalRepository
|
867d49e42a6b763eecee479abca05fc14859b523
|
bd6b95c28124b68add7cd2d3b9cdebc0a45e13ba
|
refs/heads/master
| 2021-05-27T21:57:57.455618
| 2014-05-25T20:05:36
| 2014-05-25T20:05:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,090
|
r
|
run_analysis.R
|
# You should create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each
# measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive activity names.
# 5. Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
# Note: The data set is a zip file that was downloaded manually so Before this
# script is run, you need to set your working directory to the appropriate
# location (setwd()) to where the extracted zip file resides
setwd("L:\\Files\\Coursera\\Getting and Cleaning Data\\Project\\UCI HAR Dataset\\")
# Read in training and test data
# create the data directory that will hold the merged files
if(!file.exists("./data")){dir.create("./data")}
#First we need to read the features list (headers)
library(reshape2)
features <- read.table("features.txt")
features <- rbind(features, data.frame(V1 = "562",V2 = "subject"))
features <- rbind(features, data.frame(V1 = "563",V2 = "y"))
#tranpose feature list so it can be used as headers for merged dataset
features <- t(features[2])
features$V1 <- NULL
#read files into environment
testData <- read.table(file="./test/X_test.txt",header=FALSE)
trainData <- read.table(file="./train/X_train.txt",header=FALSE)
subject_test <- read.table(file="./test/subject_test.txt",header=FALSE)
y_test <- read.table(file="./test/y_test.txt",header=FALSE)
subject_train <- read.table(file="./train/subject_train.txt",header=FALSE)
y_train <- read.table(file="./train/y_train.txt",header=FALSE)
activity_labels <- read.table(file="./activity_labels.txt",header=FALSE)
# Merge datasets
#set the column names
names(subject_test) <- "subject"
names(subject_train) <- "subject"
names(y_test) <- "y"
names(y_train) <- "y"
names(activity_labels) <- "y"
testData <- cbind(testData,subject_test,y_test)
trainData <- cbind(trainData,subject_train,y_train)
mergedData <- merge(testData,trainData,all=TRUE)
#set the column names
names(mergedData) <- features
# Extract only the mean and Std dev for each measurement (and subject and y)
mergedData <- mergedData[ , grepl( "mean|std" , names( mergedData ) ) | names(mergedData) =="y" |
names(mergedData) == "subject" ]
mergedData <- mergedData[ , !grepl( "Freq" , names( mergedData ) ) ]
# Assign the descriptive activity names to the data set
mergedData <- merge(x = mergedData, y = activity_labels, by = "y", all.x = TRUE)
# Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
mergedDataTidy <- melt(mergedData,id.vars = c("subject","y"))
mergedDataTidy <- cast(data=mergedDataTidy, subject ~ y,mean,value = 'value')
|
4c064bb2b7757b461b2ddb78bd12745b726c31d6
|
5787f475dc6fedd99056018a2221ad546bef576a
|
/calcJMI.R
|
081a95c1fd8e543693ae00dd47aa8c676932aed3
|
[] |
no_license
|
mmaitenat/ideafix-behind
|
e4c5d1faa3acf72465f19691d81035a58fe2207c
|
361694544437c2f366ba641bfa954dfeb8bf334e
|
refs/heads/master
| 2023-08-01T04:21:35.549217
| 2021-09-13T11:33:31
| 2021-09-13T11:33:31
| 311,494,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,851
|
r
|
calcJMI.R
|
#!/usr/bin/env Rscript
library(tidyverse)
library(infotheo)
DIR <- "~/data/ENA_SRP044740/tidydata/"
Y.deam.filenames <- list.files(path = DIR, pattern = "_deaminations_Y.rds", full.names = TRUE)
Y.deam <- Y.deam.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.)))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(isDeam == "1")
Y.mut.filenames <- list.files(path = DIR, pattern = "_real-mutations_FFPE_Y.rds", full.names = TRUE)
Y.mut <- Y.mut.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.))))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(isSomatic == "1" | isSNP == "1")
X.deam.filenames <- list.files(path = DIR, pattern = "_deaminations_X.rds", full.names = TRUE)
X.deam <- X.deam.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.)))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(complete_id %in% Y.deam$complete_id)
X.mut.filenames <- list.files(path = DIR, pattern = "_real-mutations_FFPE_X.rds", full.names = TRUE)
X.mut <- X.mut.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.))))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(complete_id %in% Y.mut$complete_id)
X <- bind_rows(list(deam = X.deam, mut = X.mut), .id = "source") %>% mutate_if(is.character, as.factor)
Y <- bind_rows(list(deam = Y.deam, mut = Y.mut), .id = "source")
# Tidy X and Y matrices
# Keep only C>T/G>A. This is temporary, because only those changes have FDeamC for now.
nonCT.idx <- which(is.na(X), arr.ind = TRUE)[,1]
X <- X[-nonCT.idx,] %>%
droplevels()
Y <- Y[-nonCT.idx,]
# Check if a mutation is repeated i.e. considered both as somatic mutation and deamination. If it is, remove it from deamination list
X %>%
group_by(sample) %>%
filter(duplicated(id)) %>%
pull(complete_id) -> dup.ids
X %>%
filter(!(complete_id %in% dup.ids) | source != "deam") -> X
Y %>%
filter(!(complete_id %in% dup.ids) | source != "deam") -> Y
# Frequency filter
AF.cutoff <- 0.3
X %>%
filter(allele.freq <= AF.cutoff) -> X
Y %>%
filter(complete_id %in% X$complete_id) -> Y
miniY <- Y %>%
pull(isDeam) %>%
factor(levels = c("0", "1"))
# Factors
X %>%
select_if(is.factor) %>%
select(-contains('id'), -contains('sample'), -contains('source')) -> X_factor
X_factor %>%
map_dbl(function(x) 2*mutinformation(x, miniY)/(entropy(x) + entropy(miniY))) -> mutInfo_values
# Discrete-valued variables
X %>%
select(is.repeat.region, hp.length) %>%
map_dbl(function(x) 2*mutinformation(x, miniY)/(entropy(x) + entropy(miniY))) -> mutInfo_values_discrete
|
c718bbc17467803c9503df7e51a05f390c6571e9
|
4dfa999678e9faff7957ef086d7ab0567dfca216
|
/data_frame.R
|
b0c5481abfaccad0709ad3aba4ae188db412281a
|
[] |
no_license
|
F-Cruz/R
|
506796ca7ec7605e59649e59408e4b1bb13eae67
|
7a85688693c84eac1ea1e0810c73f7652f8341eb
|
refs/heads/master
| 2020-05-23T19:25:27.426977
| 2019-05-15T22:48:08
| 2019-05-15T22:48:08
| 186,912,068
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
data_frame.R
|
# Data Frame
d1 <- data.frame(x = 1:10, y = c(51,54,61,67,68,75,77,75,80,82))
d1
names(d1)
class(d1)
d1$x
d1$y
plot(d1)
|
638bf86e462c13a51d4082ce328aafc1162c301a
|
470c71add81cdf8e9076059dc471c72932bd5aef
|
/functions/getDataFunctions.R
|
9e0f7a6f39c0f8d26d99738e669f9a1a9e3749d8
|
[] |
no_license
|
erichhuang/CRC-RASness
|
26fdc06704807e91f9b22f077fecf34414fca980
|
cb584147b245c9be45244c2b221db63c4bdc5648
|
refs/heads/master
| 2021-01-18T06:24:43.752181
| 2013-02-13T17:09:02
| 2013-02-13T17:09:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,494
|
r
|
getDataFunctions.R
|
## FUNCTIONS TO EXTRACT RAW DATA OBJECTS FROM SYNAPSE
#####
## ANALYST: BRIAN M. BOT
#####
#####
## KFSYSCC DATA
#####
getKFSYSCCdata <- function(){
require(synapseClient)
require(affy)
## GRAB THE ARCHIVE FILE FROM KOO - UNTAR IN TEMP DIRECTORY
kfEnt <- downloadEntity("syn1528362")
kfDir <- tempfile(pattern="kfDir")
dir.create(kfDir)
untar(file.path(kfEnt$cacheDir, kfEnt$files), exdir = kfDir)
theseFiles <- list.celfiles(kfDir, recursive=T, full.names=T)
## READ IN CEL FILES
kooExpr <- ReadAffy(filenames=theseFiles)
## READ IN THE CLINICAL DATA
clinEnt <- downloadEntity("syn1588162")
kooClin <- read.csv(file.path(clinEnt$cacheDir, clinEnt$files), as.is=T)
stopifnot(all(sapply(strsplit(sampleNames(kooExpr), ".", fixed=T), "[[", 1) == kooClin$SN))
rownames(kooClin) <- sampleNames(kooExpr)
pData(kooExpr) <- kooClin
return(kooExpr)
}
#####
## TCGA DATA -- COAD AND READ
#####
loadTCGAFileFromEntity <- function(synId){
require(synapseClient)
ent <- downloadEntity(synId)
df <- read.delim(file.path(ent$cacheDir, ent$files), header=F, as.is=T)
colnames(df) <- as.character(df[1, ])
df <- df[-1, ]
rownames(df) <- as.character(df[, 1])
df <- df[, -1]
return(df)
}
#####
## AGILENT
getTCGAcrcAgilent <- function(){
coadAgilent <- loadTCGAFileFromEntity("syn417828")
readAgilent <- loadTCGAFileFromEntity("syn418082")
if( all(rownames(coadAgilent) == rownames(readAgilent)) ){
theseFeatures <- rownames(coadAgilent)
crcAgilent <- cbind(coadAgilent, readAgilent)
} else{
stop("rownames do not match")
}
thesePatients <- sapply(strsplit(colnames(crcAgilent), "-", fixed=T), function(x){
paste(x[1:3], collapse="-")
})
if( all(duplicated(thesePatients) == FALSE) ){
colnames(crcAgilent) <- thesePatients
} else{
stop("duplicated patients")
}
## CONVERT TO NUMERIC MATRIX
crcAgilent <- apply(crcAgilent, 2, as.numeric)
rownames(crcAgilent) <- theseFeatures
return(crcAgilent)
}
#####
## RNAseq
getTCGAcrcRNAseq <- function(){
coadRNAseq <- loadTCGAFileFromEntity("syn1446197")
readRNAseq <- loadTCGAFileFromEntity("syn1446276")
if( all(rownames(coadRNAseq) == rownames(readRNAseq)) ){
theseFeatures <- rownames(coadRNAseq)
crcRNAseq <- cbind(coadRNAseq, readRNAseq)
} else{
stop("rownames do not match")
}
thesePatients <- sapply(strsplit(colnames(crcRNAseq), "-", fixed=T), function(x){
paste(x[1:3], collapse="-")
})
if( all(duplicated(thesePatients) == FALSE) ){
colnames(crcRNAseq) <- thesePatients
} else{
stop("duplicated patients")
}
## CONVERT TO NUMERIC MATRIX
crcRNAseq <- apply(crcRNAseq, 2, as.numeric)
rownames(crcRNAseq) <- theseFeatures
return(crcRNAseq)
}
#####
## CLINICAL DATA AT THE PATIENT LEVEL
getTCGAcrcClinical <- function(){
coadClin <- loadTCGAFileFromEntity("syn1446080")
readClin <- loadTCGAFileFromEntity("syn1446153")
coadClin$rns <- rownames(coadClin)
readClin$rns <- rownames(readClin)
crcClin <- merge(x=coadClin, y=readClin, all=T)
rownames(crcClin) <- crcClin$rns
crcClin$rns <- NULL
return(crcClin)
}
#####
## GAEDCKE DATASET FROM GEO
## Agilent-014850 Whole Human Genome Microarray 4x44K G4112F
#####
getGaedckeFromGEO <- function(){
require(GEOquery)
geoFiles <- getGEO("GSE20842", GSEMatrix=T, AnnotGPL=T)
gaedckeEset <- geoFiles$GSE20842_series_matrix.txt.gz
# NOTE: KRAS STATUS - pData(gaedckeEset)$characteristics_ch1.5 IN pData
## SUBSET TO ONLY TUMORS
clin <- pData(gaedckeEset)
gaedckeEset <- gaedckeEset[, clin$characteristics_ch1.4 == "tissue: tumor" ]
return(gaedckeEset)
}
#####
## KHAMBATA-FORD DATASET FROM GEO
##
#####
getKhambataFromGEO <- function(){
require(GEOquery)
geoFiles <- getGEO("GSE5851", GSEMatrix=T, AnnotGPL=T)
khambataEset <- geoFiles$GSE5851_series_matrix.txt.gz
return(khambataEset)
}
#####
## EMEXP3557
#####
## NEED INFORMATION ON THIS CHIP -- ADXCRCG2a520319
## NEED CLINICAL INFORMATION
#####
getEMEXP3557 <- function(){
require(synapseClient)
require(affy)
ent <- downloadEntity("syn372544")
exprSet <- ReadAffy(celfile.path=ent$cacheDir)
}
#####
## EMEXP991
#####
## NEED CLINICAL INFORMATION
#####
getEMEXP991 <- function(){
require(synapseClient)
ent <- downloadEntity("syn202357")
exprSet <- ReadAffy(celfile.path=ent$cacheDir)
## exprSet@protocolData@data$ScanDate
## ALL SCANED ON SAME DAY
}
|
8dd74cc077576970912728f371c506ad54b14a15
|
e8bd1221d5edf301183e222ae215afa7f3a4c166
|
/man/sample.lgcp.Rd
|
27656d2510a2a2794014425318109fcd46a77025
|
[] |
no_license
|
dill/inlabru
|
1b9a581ae5b56246fcd748db8df051ae4ff8bfa8
|
e2c38a34d591f712b57cbe430c24bb0a82f03ae4
|
refs/heads/master
| 2021-01-22T22:53:21.963501
| 2017-03-18T09:30:08
| 2017-03-18T09:30:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,297
|
rd
|
sample.lgcp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampling.R
\name{sample.lgcp}
\alias{sample.lgcp}
\title{Sample from a log Gaussian Cox process (LGCP)}
\usage{
sample.lgcp(mesh, loglambda, strategy = "rectangle", R = 6371,
samplers = NULL)
}
\arguments{
\item{mesh}{An \link{inla.mesh} object}
\item{loglambda}{A vector of log intensities at the mesh vertices (for higher order basis functions, e.g. for \code{inla.mesh.1d} meshes, \code{loglambda} should be given as \code{mesh$m} basis function weights rather than the values at the \code{mesh$n} vertices)}
\item{strategy}{Only applicable to 2D meshes. Use "rectangle" for flat 2D meshes and "spherical" for "sliced-spherical" for spherical meshes.}
\item{R}{Only for spherical meshes. This sets the radius of the sphere approximated by the mesh.}
\item{samplers}{A SpatialPolygonsDataFrame. Simulated points that fall outside these polygons are discarded.}
}
\value{
point samples on the mesh
}
\description{
Sample from a log Gaussian Cox process (LGCP)
}
\examples{
library(inlabru)
vertices = seq(0, 3, by = 0.1)
mesh = inla.mesh.1d(vertices)
loglambda = 5-0.5*vertices)
pts = sample.lgcp(mesh, loglambda)
pts$y = 0
plot(vertices, exp(loglambda), type = "l", ylim = c(0,150))
points(pts, pch = "|" )
}
|
37ca87f0d8a35fda90e9121c5ec92682b91bfc63
|
836b133c87bce43ae20673cc1842ab20e9decc87
|
/paper_code/Figure_codes/SFigures/FigureS9_distinct_module/FigureS9C_mutability_score_environment_bin.R
|
e63d92ac44f5e45652d36c8b8165e68c7686d512
|
[
"MIT"
] |
permissive
|
sashaflevy/PPiSeq
|
b98836b99e1e78ccb54e02be46792c700cb7fa32
|
646dbe151e7b6044e762fff1cf36b185dffe3bdc
|
refs/heads/master
| 2021-07-01T14:47:49.241826
| 2020-10-05T15:29:52
| 2020-10-05T15:29:52
| 177,872,906
| 0
| 4
| null | 2020-10-05T15:29:53
| 2019-03-26T21:41:35
|
HTML
|
UTF-8
|
R
| false
| false
| 3,737
|
r
|
FigureS9C_mutability_score_environment_bin.R
|
setwd("~/Desktop/PPiSeq_additional_data/")
source("function.R") # load commonly used functions
#Commonly used colors
apple_colors = c("#5AC8FA", "#FFCC00", "#FF9500", "#FF2D55", "#007AFF", "#4CD964", "#FF3B30",
"#8E8E93", "#EFEFF4", "#CECED2", "#000000", "007AFF")
vScore_PPI = csvReader_T("Datasets_generated_by_preprocessing/Variation_score_PPI_environment_neg_zero_SD_merge_filter.csv")
PPI_pair = split_string_vector(vScore_PPI[,1])
protein_unique = unique(as.vector(PPI_pair)) # 2082
vScore_protein = cbind(protein_unique, rep(0, length(protein_unique)))
for(i in 1:length(protein_unique)){
index_protein = unique(c(which(PPI_pair[,1] == protein_unique[i]),
which(PPI_pair[,2] == protein_unique[i])))
vScore_protein[i,2] = mean(as.numeric(vScore_PPI[index_protein,3]))
}
#### I put all the protein degree data into each bin
vScore_bin = function(vScore_PPI, bin, vScore_protein){
stability_1_PPI = vScore_PPI[which(as.numeric(vScore_PPI[,2]) == bin), 1]
PPI_pair = split_string_vector(stability_1_PPI)
protein_unique = unique(as.vector(PPI_pair))
all_vScore = as.numeric(vScore_protein[match(protein_unique, as.character(vScore_protein[,1])), 2])
return(all_vScore)
}
stability_1 = data.frame(vScore = vScore_bin(vScore_PPI, 1, vScore_protein))
stability_2 = data.frame(vScore = vScore_bin(vScore_PPI, 2, vScore_protein))
stability_3 = data.frame(vScore = vScore_bin(vScore_PPI, 3, vScore_protein))
stability_4 = data.frame(vScore = vScore_bin(vScore_PPI, 4, vScore_protein))
stability_5 = data.frame(vScore = vScore_bin(vScore_PPI, 5, vScore_protein))
stability_6 = data.frame(vScore = vScore_bin(vScore_PPI, 6, vScore_protein))
stability_7 = data.frame(vScore = vScore_bin(vScore_PPI, 7, vScore_protein))
stability_8 = data.frame(vScore = vScore_bin(vScore_PPI, 8, vScore_protein))
stability_9 = data.frame(vScore = vScore_bin(vScore_PPI, 9, vScore_protein))
stability_1$label = "1"
stability_2$label = "2"
stability_3$label = "3"
stability_4$label = "4"
stability_5$label = "5"
stability_6$label = "6"
stability_7$label = "7"
stability_8$label = "8"
stability_9$label = "9"
stability_bin_vScore = rbind(stability_1, stability_2, stability_3,
stability_4, stability_5, stability_6,
stability_7, stability_8, stability_9)
min(stability_bin_vScore$vScore)
col_chosen = c("#4575b4","#74add1","#abd9e9","#e0f3f8","#ffffbf","#fee090", "#fdae61","#f46d43","#d73027")
library(ggplot2)
ggplot(stability_bin_vScore, aes(x = vScore, fill = label, col = label))+
geom_density(alpha = 0.05)+
scale_color_manual(name = "Number of positive environments", values = col_chosen)+
scale_fill_manual(name = "Number of positive environments", values = col_chosen)+
scale_x_continuous(name = "Variability score",
limits=c(0, 3),
breaks = seq(0,3, by =0.5),
labels = seq(0,3, by= 0.5)) +
ylab("Density") +
guides(fill=guide_legend(ncol=3), col = guide_legend(ncol= 3))+
theme(legend.key = element_blank(), legend.position = c(0.6,0.8),
legend.text=element_text(size=10),legend.title=element_text(size=10),
legend.key.size = unit(0.5, "cm"))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
theme(axis.text.x = element_text(size = 10, color = "black"),
axis.text.y.left = element_text(size = 10, color = "black"),
axis.title.y=element_text(size=10)) +
theme(text = element_text(size=10))
ggsave("Figures/SFigures/SFigure9/FigureS9C_mutability_score_environment_bin.pdf", width =4, height =4)
|
819592c3b3a59440c3e56443d0aad9bed1459c80
|
63bab589b6d666684a490d11320621b3c41e66cd
|
/scrape_parler.R
|
f95edaee2d689b13fa21a46b8d163b1406f1d272
|
[] |
no_license
|
inh2102/scrape-parler-R
|
626fac07045692a404746555ba2767b96f8c8ad0
|
150562c79d09312d5e43d9364c3171c998d494ed
|
refs/heads/main
| 2023-02-10T09:04:47.348211
| 2021-01-05T20:29:44
| 2021-01-05T20:29:44
| 316,870,699
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 124
|
r
|
scrape_parler.R
|
source("functions.R")
packages()
df <- scrape_parler(scrolls=10)
posts <- df$posts
trending_hashtags <- df$trending_hashtags
|
43710dccc76cfdf0b034f4a5ac09905571ae1e85
|
bb7cc2b21d4337ca8f0889e29b01a4fdfd301d8c
|
/data-raw/umcsent_example.R
|
ed45ddb9767f06a84a82d88464b63749114fefa8
|
[] |
no_license
|
cbergmeir/Rlgt
|
2d8ecb948c3c26cb28b1c4108ed82b3a9b4e0f22
|
652ba908205ca49a92e166000fea24843a33f02c
|
refs/heads/master
| 2023-08-31T13:17:37.877023
| 2023-08-31T07:46:53
| 2023-08-31T07:46:53
| 154,053,696
| 21
| 13
| null | 2023-09-11T05:46:22
| 2018-10-21T21:25:08
|
R
|
UTF-8
|
R
| false
| false
| 2,319
|
r
|
umcsent_example.R
|
library(gtrendsR)
library(dplyr)
library(lubridate)
umscent <- read.csv('../data-raw/UMCSENT.csv', stringsAsFactors = FALSE)
umscent$DATE <- as.Date(umscent$DATE)
# search engine
search.engine <- gtrends(keyword = NA, geo = "US",
category = 485, time = "all")$interest_over_time
# financial planning
financial.planning <- gtrends(keyword = NA, geo = "US",
category = 903, time = "all")$interest_over_time
# business news
bus.news <- gtrends(keyword = NA, geo = "US",
category = 784, time = "all")$interest_over_time
# investing
investing <- gtrends(keyword = NA, geo = "US",
category = 107, time = "all")$interest_over_time
# energy utilities
energy.utilities <- gtrends(keyword = NA, geo = "US",
category = 233, time = "all")$interest_over_time
search.engine$date <- as.Date(search.engine$date)
financial.planning$date <- as.Date(financial.planning$date)
bus.news$date <- as.Date(bus.news$date)
investing$date <- as.Date(investing$date)
energy.utilities$date <- as.Date(energy.utilities$date)
search.engine <- search.engine %>%
dplyr::select(date, hits) %>%
rename(search.engine = hits)
financial.planning <- financial.planning %>%
dplyr::select(date, hits) %>%
rename(financial.planning = hits)
bus.news <- bus.news %>%
dplyr::select(date, hits) %>%
rename(bus.news = hits)
investing <- investing %>%
dplyr::select(date, hits) %>%
rename(investing = hits)
energy.utilities <- energy.utilities %>%
dplyr::select(date, hits) %>%
rename(energy.utilities = hits)
umcsent.example.prestransform <- umscent %>%
rename(consumer.sent = UMCSENT, date = DATE) %>%
inner_join(search.engine, by = c('date')) %>%
inner_join(financial.planning, by = c('date')) %>%
inner_join(bus.news, by = c('date')) %>%
inner_join(investing, by = c('date')) %>%
inner_join(energy.utilities, by = c('date'))
curr_series <- "consumer.sent"
umcsent.example <- umcsent.example.prestransform %>%
mutate_at(.vars = c("consumer.sent", "search.engine",
"financial.planning", "bus.news",
"investing", "energy.utilities"),
.funs = log)
save(umcsent.example, file = './data/umcsent.example.rda')
|
782448ccc07a4cba6576a80e632705bb8a47b44a
|
43b6f5fce442d4eb963ee6cfc74f33662602ed61
|
/man/tds_list_datasets.Rd
|
05c99e420d11a7f2e9615a641415d28e2fd44cd3
|
[] |
no_license
|
mt-climate-office/thredds
|
92e6cb1ebe8c8b4d7d794c664d5e4c91cfe9437c
|
2d95d0a6d66213d0877db25697e6e4fd79288299
|
refs/heads/master
| 2021-09-13T17:26:46.345417
| 2018-05-02T14:59:25
| 2018-05-02T14:59:25
| 94,267,113
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 806
|
rd
|
tds_list_datasets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tds_list_datasets.R
\name{tds_list_datasets}
\alias{tds_list_datasets}
\title{Get a list of available datasets on a THREDDS data server.}
\usage{
tds_list_datasets(thredds_url, recursive = FALSE)
}
\arguments{
\item{thredds_url}{A string providing the URL of a THREDDS server,
usually ending with '/thredds/'.}
\item{recursive}{Should the function recurse into nested THREDDS catalogs.
Defaults to 'FALSE'.}
}
\value{
A data_frame containing dataset names and paths.
}
\description{
Get a list of available datasets on a THREDDS data server.
}
\examples{
library(thredds)
tds_list_datasets(thredds_url = "https://cida.usgs.gov/thredds/")
tds_list_datasets(thredds_url = "http://thredds.northwestknowledge.net:8080/thredds/")
}
|
44392d6f2a9d2771890ad6018c10e6d8c0eb108e
|
725a33f27fce430ee481a3542aae5bb81a94dfc0
|
/R/qcMetric_EVD.R
|
0d86173fa67ddb8b2e986c04bcbc0634f49a17be
|
[
"BSD-3-Clause"
] |
permissive
|
cbielow/PTXQC
|
fac47ecfa381737fa0cc36d5ffe7c772400fb24e
|
f4dc4627e199088c83fdc91a1f4c5d91f381da6c
|
refs/heads/master
| 2023-07-20T00:39:45.918617
| 2023-05-17T14:23:03
| 2023-05-17T14:23:03
| 20,481,452
| 41
| 30
|
NOASSERTION
| 2023-05-17T14:23:04
| 2014-06-04T11:53:49
|
HTML
|
UTF-8
|
R
| false
| false
| 71,991
|
r
|
qcMetric_EVD.R
|
#####################################################################
qcMetric_EVD_UserContaminant = setRefClass(
"qcMetric_EVD_UserContaminant",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"User defined contaminant plot based on peptide intensities and counts.
Usually used for Mycoplasma detection, but can be used for an arbitrary (set of) proteins.
All proteins (and their peptides) which contain the search string from the YAML file are considered contaminants.
The contaminant's search string is searched in the full FASTA header in proteinGroups.txt.
If proteinGroups.txt is not available/found,
only protein identifiers can be considered. The search realm used is given in the plot subtitle.
You should choose the contaminant name to be distinctive.
Only peptides belonging to a single protein group are considered when computing the fractions (contaminant vs. all),
since peptides shared across multiple groups are potentially false positives.
Two abundance measures are computed per Raw file:
- fraction of contaminant intensity (used for scoring of the metric)
- fraction of contaminant spectral counts (as comparison; both should be similar)
If the intensity fraction exceeds the threshold (indicated by the dashed horizontal line) a contamination is assumed.
For each Raw file exceeding the threshold an additional plot giving cumulative Andromeda peptide
score distributions is shown.
This allows to decide if the contamination is true. Contaminant scores
should be equally high (or higher), i.e. to the right, compared to the sample scores.
Each graph's subtitle is augmented with a p-value of the Kologorov-Smirnoff test of this data
(Andromeda scores of contaminant peptides vs. sample peptides).
If the p-value is high, there is no score difference between the two peptide populations.
In particular, the contaminant peptides are not bad-scoring, random hits.
These p-values are also shown in the first figure for each Raw file. Note that the p-value is purely based
on Andromeda scores and is independent of intensity or spectral counts.
Heatmap score [EVD: Contaminant <name>]: boolean score, i.e. 0% (fail) if the intensity threshold was exceeded; otherwise 100% (pass).
",
workerFcn = function(.self, df_evd, df_pg, lst_contaminants)
{
#lst_contaminants = yaml_contaminants
## completeness check
## PG is either missing, or has the correct data
if (!is.null(df_pg) | !checkInput(c("id", "fasta.headers"), df_pg)) return()
## "score" might not be present (e.g. missing in MQ 1.0.13.13)
if (!checkInput(c("protein.group.ids", "type", "intensity", "fc.raw.file"),df_evd)) return()
local_qcScores = data.frame()
lpl = list()
ca_entry = lst_contaminants[[1]]
for (ca_entry in lst_contaminants)
{
ca = ca_entry[1]
##
if (ca == FALSE) {
cat("No special contaminants requested!\n")
break;
}
ca_thresh = as.numeric(ca_entry[2])
not_found = TRUE
if (is.null(df_pg)) {
## only search in protein IDs
pg_id = df_evd$protein.group.ids[grep(ca, df_evd$proteins)]
## this could be multiple PGs ("PG1; PG2") per cell, but we require unique peptides below, so its not a problem
search_realm = "protein name only"
} else {
## search in FASTA headers and protein IDs
pg_id = df_pg$id[c(grep(ca, df_pg$fasta.headers, ignore.case = TRUE),
grep(ca, df_pg$protein.ids, ignore.case = TRUE))]
search_realm = "full FASTA header"
}
if (length(pg_id) > 0)
{
## we might or might not have found something... we plot it anyways, so the user can be sure that we searched for it
not_found = FALSE
## find peptides which only have one group (ignoring razor peptides where we cannot be sure)
evd_uniqueGroup = !grepl(";", df_evd$protein.group.ids)
## do not trust MBR here. We want real evidence!
evd_realMS = !grepl("MATCH", df_evd$type)
## for each Raw file: find unique peptides of our contaminant
cont_data.l = plyr::dlply(df_evd[evd_uniqueGroup & evd_realMS, ], "fc.raw.file",
function(x) {
if (length(grep(";", x$protein.group.ids))) stop("more than one proteinGroup for supposedly unique peptide...")
x$idx_cont = x$protein.group.ids %in% pg_id
sc = sum(x$idx_cont) / nrow(x) * 100
int = sum(as.numeric(x$intensity[x$idx_cont]), na.rm = TRUE) / sum(as.numeric(x$intensity), na.rm = TRUE) * 100
above.thresh = (sc > ca_thresh) | (int > ca_thresh)
cont_scoreECDF = NULL;
if ("score" %in% colnames(x)) {
cont_scoreECDF = plyr::ddply(x, "idx_cont", function(xx) {
if (length(unique(xx$score)) < 2) return(NULL) ## not enough data for ECDF
r = getECDF(xx$score)
r$condition = c("sample", "contaminant")[xx$idx_cont[1]+1]
return(r)
})
}
if (!any(x$idx_cont)){
ks_p = NA
} else { ## no contaminant peptide
ks_p = suppressWarnings( ## brags about '-value will be approximate in the presence of ties'
ks.test(x$score[x$idx_cont], x$score[!x$idx_cont], alternative = "greater")$p.value
)
}
return (list(cont_data = data.frame(spectralCount = sc, intensity = int,
above.thresh = above.thresh, fc.raw.file = x$fc.raw.file[1],
score_KS = ks_p),
cont_scoreECDF = cont_scoreECDF))
})
head(cont_data.l)
## melt
cont_data = plyr::ldply(cont_data.l, function(l) { l$cont_data })
cont_data.long = reshape2::melt(cont_data, id.vars="fc.raw.file")
#
# old: not_found = all(cont_data.long$value[cont_data.long$variable == "above.thresh"] == FALSE)
}
if (not_found)
{ ## identifier was not found in any sample
pl_cont = ggText("EVD: Contaminants",
paste0("Contaminant '", ca, "' was not found in any sample.\n\nDid you use the correct database?"),
"red")
lpl = append(lpl, list(pl_cont))
} else {
## plot User-Contaminants
lpl_i = byXflex(data = cont_data.long, indices = cont_data.long$fc.raw.file, subset_size = 120,
FUN = plot_ContUser, sort_indices = TRUE,
name_contaminant = ca, extra_limit = ca_thresh, subtitle = paste("search realm:", search_realm))
lpl = append(lpl, lpl_i)
## plot Andromeda score distribution of contaminant vs. sample
pl_andr = plyr::llply(cont_data.l, function(l)
{
if (l$cont_data$above.thresh == FALSE ||
is.null(l$cont_scoreECDF))
{
return(NULL) ## some entries might be skipped (not significant)
}
p = plot_ContUserScore(l$cont_scoreECDF, l$cont_data$fc.raw.file, l$cont_data$score_KS)
#print(p)
return(p)
})
pl_andr_nonNull = plyr::compact(pl_andr) ## remove 'NULL' entries from plot list
lpl = append(lpl, pl_andr_nonNull)
## add heatmap column
cname = sprintf(.self$qcName, ca)
cont_data[,cname] = as.numeric(!cont_data$above.thresh) ## inverse (0 is 'bad')
qcScore = cont_data[, c("fc.raw.file", cname)]
if (ncol(local_qcScores) == 0){
local_qcScores = qcScore
} else {
local_qcScores = merge(local_qcScores, qcScore)
}
}
} ## contaminant loop
return(list(plots = lpl, qcScores = local_qcScores))
},
qcCat = "Prep",
qcName = "EVD:User~Contaminant~(%s)",
orderNr = 0020
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_PeptideInt = setRefClass(
"qcMetric_EVD_PeptideInt",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Peptide precursor intensity per Raw file from evidence.txt WITHOUT match-between-runs evidence.
Low peptide intensity usually goes hand in hand with low MS/MS identifcation rates and unfavourable signal/noise ratios,
which makes signal detection harder. Also instrument acquisition time increases for trapping instruments.
Failing to reach the intensity threshold is usually due to unfavorable column conditions, inadequate
column loading or ionization issues. If the study is not a dilution series or pulsed SILAC experiment, we
would expect every condition to have about the same median log-intensity (of 2<sup>%1.1f</sup>).
The relative standard deviation (RSD) gives an indication about reproducibility across files and should be below 5%%.
Depending on your setup, your target thresholds might vary from PTXQC's defaults.
Change the threshold using the YAML configuration file.
Heatmap score [EVD: Pep Intensity (>%1.1f)]:
Linear scale of the median intensity reaching the threshold, i.e. reaching 2<sup>21</sup> of 2<sup>23</sup> gives score 0.25.
",
workerFcn = function(.self, df_evd, thresh_intensity)
{
## completeness check
if (!checkInput(c("fc.raw.file", "intensity", "contaminant"), df_evd)) return()
## update helpText
.self$helpText = sprintf(.self$helpTextTemplate, thresh_intensity, thresh_intensity)
medians_pep = plyr::ddply(df_evd[ , c("fc.raw.file", "intensity")], "fc.raw.file",
function(x) data.frame(med = log2(quantile(x$intensity, probs=0.5, na.rm = TRUE))))
int_dev_pep = RSD((medians_pep$med))
int_dev.s = pastet("INT RSD [%]", round(int_dev_pep, 3))
lpl = boxplotCompare(data = df_evd[, c("fc.raw.file", "intensity", "contaminant")],
log2 = TRUE,
mainlab = "EVD: peptide intensity distribution",
ylab = expression(log[2]*" intensity"),
sublab = paste0("RSD ", round(int_dev_pep, 1),"% (expected < 5%)\n"),
abline = thresh_intensity)
#for (pl in lpl) print(pl)
## QC measure for peptide intensity
qc_pepint = medians_pep
cname = sprintf(.self$qcName, thresh_intensity)
qc_pepint[,cname] = qualLinThresh(2^qc_pepint$med, 2^thresh_intensity) ## use non-log space
qcScore = qc_pepint[, c("fc.raw.file", cname)]
return(list(plots = lpl, qcScores = qcScore))
},
qcCat = "prep",
qcName = "EVD:~Peptide~Intensity~(\">%1.1f\")",
orderNr = 0030
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_ReporterInt = setRefClass(
"qcMetric_EVD_ReporterInt",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpText =
"ITRAQ/TMT reporter intensity violin plots of all PSMs for each channel and Raw file.
The second subplot shows labeling efficiency (LE), i.e the fraction of PSMs with non-zero abundance (100% = full labeling of all PSMs; 0% = no reporter ions at all). This is used for heatmap scoring. See below.
There is a similar 'Experimental Group' based metric/plot based on proteins.txt.
PTXQC uses isotope-corrected intensities (eliminating channel carry-over) to allow for detection of empty channels, e.g. due to mis-labeling.
If MaxQuant did no isotope correction (i.e. corrected and uncorrected channels are equal),
the plot title will show a warning. The scores are too optimistic in this case (since carry-over will be mistaken for actual signal).
Note: global labelling efficiency can only be judged indirectly with this metric, since isobaric reporters where set as
fixed modification. Thus, MaxQuant. will only identify labeled peptides in the first place.
Observing only very few peptides (see peptide count metric), is a good indicator.
However, if only the labeling of a few channels failed, this will be noticable here!
Labeling can still be poor, even though identification was successful.
A labeling efficiency (LE) is computed per Raw file AND channel as: the percentage of PSMs which have non-zero reporter intensity.
Ideally LE reaches 100 percent (all peptides have an intensity in the channel; biological missingness ignored).
Heatmap score: minimum labeling efficiency per Raw file across all channels.
I.e. for 4-plex ITRAQ and two Raw files, there will be 8 labeling efficiency (LE) values.
Each Raw file is now scored by the minimum LE of all its 4 channels.
",
workerFcn=function(.self, df_evd)
{
## completeness check
if (!checkInput(c("fc.raw.file"), df_evd)) return()
## check if reporter.intensity.0... is present
cols_reporter = grepv("^reporter.intensity.corrected.[0-9]", colnames(df_evd));
cols_reporter.nc = grepv("^reporter.intensity.[0-9]", colnames(df_evd));
if(length(cols_reporter) <= 1 || length(cols_reporter.nc) <= 1) {
message("Info: Two reporter.intensity and two reporter.intensity.corrected columns are needed for metric ReporterIntensity.")
return()}
## check if correction was done at all
if (all(df_evd[1:1000, cols_reporter] == df_evd[1:1000, cols_reporter.nc], na.rm = TRUE))
{
title_subtext = "Warning: MaxQuant did NO isotope correction";
title_color = "red"
} else {
title_subtext = "";
title_color = "black"
}
g_title = "EVD: Reporter Intensities"
## use data.table for aggregation, its MUCH faster than ddply() and uses almost no extra memory
df_reps = reshape2::melt(df_evd[, c("fc.raw.file", cols_reporter)],
id.vars ="fc.raw.file",
value.name = "intensity",
variable.name = "channel")
head(df_reps)
dt_reps = data.table::data.table(df_reps)
## do NOT remove -inf and NA's and 0's -- we need them to count labeling-efficiency (#entries with intensity > 0 vs. ALL)
## rename 'reporter.intensity.corrected.0' to '0'
dt_reps$channel = substring(dt_reps$channel, nchar('reporter.intensity.corrected.') + 1)
## invert the channel order (so that channel 0 is highest, i.e. appears on top in plot)
dt_reps$channel = factor(dt_reps$channel, levels = sort(unique(dt_reps$channel), decreasing = TRUE))
head(dt_reps)
ylims_minmax = range(dt_reps$intensity[dt_reps$intensity>0])
if (is.na(ylims_minmax[1])) {ylims_minmax = range(1)} ## data is all 0! Make range at least 1, so log-range does not crash
fcn_boxplot_internal = function(data, title_subtext = title_subtext, title_color = title_color)
{
### first subplot (distribution of intensities)
data_noZero = data[data$intensity!=0,]
pl = ggplot(data=data_noZero) +
geom_violin(aes_string(x = "fc.raw.file",
y = "intensity",
color = "channel",
fill = "channel"
)) +
xlab("") +
ylab("reporter intensity (zeros removed)") +
guides(#alpha = guide_legend(title="Label Eff"),
fill = guide_legend(reverse = TRUE), ## inverse label order, so that channel 0 is on top
color = guide_none()) +
theme(axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "right",
plot.title = element_text(color = title_color)) +
ggtitle(g_title, title_subtext) +
PTXQC:::scale_x_discrete_reverse(unique(data$fc.raw.file)) +
scale_y_log10(limits = ylims_minmax) +
coord_flip()
#pl
ylims = dt_reps[, { #limits = boxplot.stats(intensity, coef = 0.7)$stats;
list(labEff_PC = sum(intensity > 0, na.rm = TRUE) / (.N))
}, by = c("fc.raw.file", "channel")]
### second subplot (labeling efficiency)
pl_eff = ggplot(data = ylims) + geom_bar(aes_string(x = "fc.raw.file",
y = "labEff_PC * 100",
fill = "channel"),
stat = "identity",
position = "dodge") +
xlab("") +
ylab("labelling efficiency (%)") +
ylim(0, 100) +
guides(fill = guide_legend(reverse = TRUE), ## inverse label order, so that channel 0 is on top
color = guide_none()) +
theme(legend.position = "right") +
ggtitle("Fraction of Non-Zero Intensities", "") +
PTXQC:::scale_x_discrete_reverse(unique(ylims$fc.raw.file)) +
coord_flip()
#pl_eff
pl_both = gridExtra::grid.arrange(pl, pl_eff, ncol=2)
#print(pl)
return(pl_both)
}
channel_count = length(cols_reporter)
lpl = byXflex(data = dt_reps, indices = dt_reps$fc.raw.file, subset_size = round(40 / channel_count),
sort_indices = TRUE, FUN = fcn_boxplot_internal, title_subtext = title_subtext, title_color = title_color)
lpl
# heatmap scoring
## .. take min score over all channels
ylims = dt_reps[, { #limits = boxplot.stats(intensity, coef = 0.7)$stats;
list(labEff_PC = sum(intensity > 0, na.rm = TRUE) / (.N))
}, by = c("fc.raw.file", "channel")]
qcScore = ylims[, list(score_min = min(labEff_PC)), by=c("fc.raw.file")]
colnames(qcScore) = c("fc.raw.file", .self$qcName)
## add manual title, since we return a grid.arrange() where automatic extraction is hard
return(list(plots = lpl, qcScores = qcScore, title = rep(list(g_title), length(lpl))))
},
qcCat = "prep",
qcName = "EVD:~Reporter~intensity",
orderNr = 0031 ## should not show up in heatmap
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_ProteinCount = setRefClass(
"qcMetric_EVD_ProteinCount",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Number of Protein groups (after FDR) per Raw file. A configurable target threshold is indicated as dashed line.
If MBR was enabled, three categories ('genuine (exclusive)', 'genuine + transferred', 'transferred (exclusive)'
are shown, so the user can judge the gain that MBR provides. Here, 'transferred (exclusive)' means that this protein group
has peptide evidence which originates only from transferred peptide IDs. The quantification is (of course) always from the
local Raw file.
Proteins in the 'genuine + transferred' category have peptide evidence from within the Raw file by MS/MS, but at the same time
also peptide IDs transferred to this Raw file using MBR were used. It is not unusual to see the 'genuine + transferred' category be the
rather large, since a protein group usually has peptide evidence from both sources.
To see of MBR worked, it is better to look at the two MBR-related metrics.
If MBR would be switched off, you can expect to see the number of protein groups corresponding to 'genuine (exclusive)' + 'genuine + transferred'.
In general, if the MBR gain is low and the MBR scores are bad (see the two MBR-related metrics),
MBR should be switched off for the Raw files which are affected (could be a few or all).
Heatmap score [EVD: Prot Count (>%1.0f)]: Linear scoring from zero. Reaching or exceeding the target threshold gives a score of 100%%.
",
workerFcn = function(.self, df_evd, df_evd_tf, thresh_protCount)
{
## completeness check
req_cols = c("fc.raw.file", "protein.group.ids", "is.transferred")
if (!checkInput(req_cols, df_evd)) return()
.self$helpText = sprintf(.self$helpTextTemplate, thresh_protCount)
protC = getProteinCounts(rbind(df_evd[,req_cols], df_evd_tf[, req_cols]))
protC$block = factor(assignBlocks(protC$fc.raw.file, 30))
max_prot = max(unlist(plyr::dlply(protC, "fc.raw.file", function(x) sum(x$counts))))
## average gain in percent
reportMTD = nrow(df_evd_tf) > 0
gain_text = ifelse(reportMTD, sprintf("MBR gain: +%.0f%%", mean(protC$MBRgain, na.rm = TRUE)), "")
lpl = plyr::dlply(protC, "block", .fun = function(x)
{
p = plot_CountData(data = x,
y_max = max(thresh_protCount, max_prot)*1.1,
thresh_line = thresh_protCount,
title = c("EVD: ProteinGroups count", gain_text))
#print(p)
return (p)
})
## QC measure for protein ID performance
qc_protc = plyr::ddply(protC, "fc.raw.file", function(x){
if (nrow(x) == 3 && length(grep("^genuine", x$category))!= 2){
stop("expected two categories to start with 'genuine...'")
}
r = data.frame(genuineAll = sum(x$counts[grep("^genuine", x$category)]))
return (r)
})
cname = sprintf(.self$qcName, thresh_protCount)
qc_protc[,cname] = qualLinThresh(qc_protc$genuineAll, thresh_protCount)
qcScore = qc_protc[, c("fc.raw.file", cname)]
## add mzQC metric
template_proteinCount = rmzqc::getQualityMetricTemplate("MS:1002406") # count of identified clusters
mzqc = lapply(1:nrow(qc_protc), function(row){
out = template_proteinCount$copy();
out$value = qc_protc$genuineAll[row];
#cat(row, " ", qc_protc$genuineAll[row], " ",out$value, "\n");
return(out) })
names(mzqc) = qc_protc$fc.raw.file
## done
return(list(plots = lpl, qcScores = qcScore, mzQC = mzqc))
},
qcCat = 'general',
qcName = "EVD:~Protein~Count~(\">%1.0f\")",
orderNr = 0450
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_PeptideCount = setRefClass(
"qcMetric_EVD_PeptideCount",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Number of unique (i.e. not counted twice) peptide sequences including modifications (after FDR) per Raw file. A configurable target threshold is indicated as dashed line.
If MBR was enabled, three categories ('genuine (exclusive)', 'genuine + transferred', 'transferred (exclusive)'
are shown, so the user can judge the gain that MBR provides.
Peptides in the 'genuine + transferred' category were identified within the Raw file by MS/MS, but at the same time
also transferred to this Raw file using MBR. This ID transfer can be correct (e.g. in case of different charge states),
or incorrect -- see MBR-related metrics to tell the difference.
Ideally, the 'genuine + transferred' category should be rather small, the other two should be large.
If MBR would be switched off, you can expect to see the number of peptides corresponding to 'genuine (exclusive)' + 'genuine + transferred'.
In general, if the MBR gain is low and the MBR scores are bad (see the two MBR-related metrics),
MBR should be switched off for the Raw files which are affected (could be a few or all).
Heatmap score [EVD: Pep Count (>%1.0f)]: Linear scoring from zero. Reaching or exceeding the target threshold gives a score of 100%%.
",
workerFcn = function(.self, df_evd, df_evd_tf, thresh_pepCount)
{
## completeness check
req_cols = c("fc.raw.file", "modified.sequence", "is.transferred")
if (!checkInput(req_cols, df_evd)) return()
if (nrow(df_evd_tf)>0 & !checkInput(req_cols, df_evd_tf)) return()
.self$helpText = sprintf(.self$helpTextTemplate, thresh_pepCount)
pepC = getPeptideCounts(rbind(df_evd[, req_cols], df_evd_tf[, req_cols]))
pepC$block = factor(assignBlocks(pepC$fc.raw.file, 30))
max_pep = max(unlist(plyr::dlply(pepC, "fc.raw.file", function(x) sum(x$counts))))
## average gain in percent
reportMTD = any(df_evd$is.transferred)
gain_text = ifelse(reportMTD, sprintf("MBR gain: +%.0f%%", mean(pepC$MBRgain, na.rm = TRUE)), "")
lpl = plyr::dlply(pepC, "block", .fun = function(x)
{
p = plot_CountData(data = x,
y_max = max(thresh_pepCount, max_pep)*1.1,
thresh_line = thresh_pepCount,
title = c("EVD: Peptide ID count", gain_text))
#print(p)
return (p)
})
## QC measure for peptide ID performance
qc_pepc = plyr::ddply(pepC, "fc.raw.file", function(x){
if (nrow(x) == 3 && length(grep("^genuine", x$category))!= 2){
stop("expected two categories to start with 'genuine...'")
}
r = data.frame(genuineAll = sum(x$counts[grep("^genuine", x$category)]))
return (r)
})
cname = sprintf(.self$qcName, thresh_pepCount)
qc_pepc[,cname] = qualLinThresh(qc_pepc$genuineAll, thresh_pepCount)
qcScore = qc_pepc[, c("fc.raw.file", cname)]
return(list(plots = lpl, qcScores = qcScore))
},
qcCat = 'general',
qcName = "EVD:~Peptide~Count~(\">%1.0f\")",
orderNr = 0400
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_RTPeakWidth = setRefClass(
"qcMetric_EVD_RTPeakWidth",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"One parameter of optimal and reproducible chromatographic separation is the distribution of widths of
peptide elution peaks, derived from the evidence table. Ideally, all Raw files show a similar
distribution, e.g. to allow for equal conditions during dynamic precursor exclusion, RT alignment or
peptide quantification.
Heatmap score [EVD: RT Peak Width]: Scored using BestKS function, i.e. the D statistic of a Kolmogoriv-Smirnoff test.
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("retention.time", "retention.length", "fc.raw.file"), df_evd)) return()
## compute some summary stats before passing data to ggplot (performance issue for large experiments)
df_evd.m.d = plyr::ddply(df_evd[,c("retention.time", "retention.length", "fc.raw.file")], "fc.raw.file", .fun = peakWidthOverTime)
head(df_evd.m.d)
## median peak width
df_evd.m.d_avg = plyr::ddply(df_evd[,c("retention.length","fc.raw.file")], "fc.raw.file", .fun = function(x) {
#fcr = as.character(x$fc.raw.file[1])
#cat(fcr)
m = median(x$retention.length, na.rm = TRUE);
return(data.frame(median = m))
})
df_evd.m.d_avg$fc.raw.file_aug = paste0(df_evd.m.d_avg$fc.raw.file, " (~", round(df_evd.m.d_avg$median, 1)," min)")
.self$outData[["avg_peak_width"]] = df_evd.m.d_avg
## augment Raw filename with avg. RT peak width
df_evd.m.d$fc.raw.file = plyr::mapvalues(df_evd.m.d$fc.raw.file, df_evd.m.d_avg$fc.raw.file, df_evd.m.d_avg$fc.raw.file_aug)
df_evd.m.d$block = factor(assignBlocks(df_evd.m.d$fc.raw.file, 6)) ## color set is 9, so do not increase this (6*150%)
## identical limits for all plots
df_evd.xlim = range(df_evd.m.d$RT, na.rm = TRUE)
## ignore top peaks, since they are usually early non-peptide eluents
df_evd.ylim = c(0, quantile(df_evd.m.d$peakWidth, 0.99, na.rm = TRUE))
## plot peak width
lpl = list()
for (bl in unique(df_evd.m.d$block))
{ ## needs to be within a function, otherwise rep_data$add and print() somehow have delayed eval's which confused ggplot...
lpl[[bl]] = plot_RTPeakWidth(data = df_evd.m.d[df_evd.m.d$block==bl,], x_lim = df_evd.xlim, y_lim = df_evd.ylim)
}
## QC measure for reproducibility of peak shape
##.. create a list of distributions
l_dists = plyr::dlply(df_evd[,c("retention.length", "fc.raw.file")], "fc.raw.file", function(x) return(x$retention.length))
qc_evd_PeakShape = qualBestKS(l_dists)
colnames(qc_evd_PeakShape) = c("fc.raw.file", .self$qcName)
return(list(plots = lpl, qcScores = qc_evd_PeakShape))
},
qcCat = "LC",
qcName = "EVD:~RT~Peak~Width",
orderNr = 0170
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_MBRAlign = setRefClass(
"qcMetric_EVD_MBRAlign",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"MBR Alignment: First of two steps (1=align, 2=transfer) during Match-between-runs.
This plot is based purely on real MS/MS ids. Ideally, RTs of identical peptides should be equal (i.e. very small residual RT delta)
across Raw files after alignment.
MaxQuants RT correction is shown in blue -- it should be well within the alignment search window (20min by default) set
during MaxQuant configuration.
The resulting residual RT delta after RT alignment (compared to a reference Raw file), is shown as green/red dots. One dot represents
one peptide (incl. charge). Every dot (peptide) outside an allowed residual delta RT (1min by default) is colored red.
All others are green.
The ratio of 'green' vs. 'green+red' peptides is annotated using 'sc: ' (for 'score') in the plot subtitles. High values are better (green peptides dominate).
If moving 'red' dots to the horizontal zero-line (to make them green) requires large RT shifts, then increasing the alignment
search window might help MaxQuant to find a better alignment.
Heatmap score [EVD: MBR Align]: ratio of 'green' vs. 'green+red' peptides
",
workerFcn = function(.self, df_evd, tolerance_matching, raw_file_mapping)
{
## completeness check
if (!checkInput(c("type", "calibrated.retention.time", "retention.time.calibration", "id", "raw.file", "modified.sequence", "charge"), df_evd)) return()
## find reference
if (('fraction' %in% colnames(df_evd)) && (length(unique(df_evd$fraction)) > 1)) {
## fractions: there must be more than one, otherwise MQ will treat the samples as unfractionated
refRaw = NA
col_fraction = "fraction"
txt_subtitle = "fraction: neighbour comparison"
evd_has_fractions = TRUE
df_evd$fraction[is.na(df_evd$fraction)] = 32000
} else {
refRaw = findAlignReference(df_evd)
col_fraction = c()
txt_subtitle = paste("alignment reference:", gsub("\\", "/", refRaw, fixed = TRUE)) ## subtitles in ggplot must not contain '\'
evd_has_fractions = FALSE
}
lpl = list()
qcScore = .self$qcScores
if (!evd_has_fractions && length(refRaw) == 0) {
lpl[[1]] = ggText("EVD: Alignment check", paste0("Cannot find a reference Raw file!\nPlease report this as a 'bug'!"))
} else {
if (!evd_has_fractions & (length(refRaw) != 1))
{
refRaw = refRaw[1] ## take the first
warning(paste0("Cannot find a unique reference Raw file (files: ", paste(refRaw, collapse=", "), "). Picking the first."), immediate. = TRUE)
}
## find RT curve based on genuine 3D peaks (should be flat)
d_alignQ = alignmentCheck(df_evd[(df_evd$type %in% c("MULTI-MSMS")),
c("calibrated.retention.time",
"id", "raw.file", col_fraction, "modified.sequence", "charge")],
referenceFile = refRaw)
## augment more columns
d_alignQ$retention.time.calibration = df_evd$retention.time.calibration[match(d_alignQ$id, df_evd$id)]
if (diff(range(na.omit(d_alignQ$retention.time.calibration))) < 1e-5)
{
txt_subtitle = paste0(txt_subtitle, " || WARNING: MaxQuant did not correct RTs in any way!");
warning("EVD MBRAlign: MaxQuant did not correct RTs in any way, despite MBR=on")
}
if (nrow(d_alignQ)==0)
{ ## very unusual case: reference contains no evidence -- e.g. pull-down experiment
lpl[[1]] = ggText("EVD: RT Distance of peptides from reference after alignment", "Alignment cannot be verfied -- no data.")
} else {
## filter data (reduce PDF file size)
evd_RT_t = thinOutBatch(d_alignQ,
"calibrated.retention.time",
"raw.file")
evd_RT_t$fc.raw.file = renameFile(evd_RT_t$raw.file, raw_file_mapping)
## QC measure for alignment quality
## compute % of matches within matching boundary (1 min by default)
qcAlign = ScoreInAlignWindow(d_alignQ, tolerance_matching)
if (!is.na(refRaw)) { ## rescue reference file (it will not show up in fraction-less data, and would otherwise be scored 'red')
qcAlign = rbind(qcAlign, data.frame(raw.file=refRaw, withinRT=1))
}
qcAlign[, .self$qcName] = qcAlign$withinRT
qcScore = qcAlign[, c("raw.file", .self$qcName)]
qcAlign$fc.raw.file = renameFile(qcAlign$raw.file, raw_file_mapping)
qcAlign$newlabel = qcAlign$fc.raw.file
if (evd_has_fractions)
{ ## amend fc.raw.file with fraction number
qcAlign$fraction = df_evd$fraction[match(qcAlign$fc.raw.file, df_evd$fc.raw.file)]
qcAlign$newlabel = paste0(qcAlign$fc.raw.file, " - frc", qcAlign$fraction)
}
## amend fc.raw.file with % good ID pairs
qcAlign$newlabel = paste0(qcAlign$newlabel, " (sc: ", round(qcAlign$withinRT*100), "%)")
evd_RT_t$fc.raw.file_ext = plyr::mapvalues(evd_RT_t$fc.raw.file, qcAlign$fc.raw.file, qcAlign$newlabel)
evd_RT_t$RTdiff_in = c("green", "red")[(abs(evd_RT_t$rtdiff) > tolerance_matching)+1]
## plot alignment result
y_lim = quantile(c(evd_RT_t$rtdiff, evd_RT_t$retention.time.calibration), probs = c(0.01, 0.99), na.rm = TRUE) * 1.1
lpl =
byX(evd_RT_t, evd_RT_t$fc.raw.file, 3*3, plot_MBRAlign, sort_indices = FALSE,
y_lim = y_lim, title_sub = txt_subtitle, match_tol = tolerance_matching)
} ## no data
} ## ambigous reference file
return(list(plots = lpl, qcScores = qcScore))
},
qcCat = "LC",
qcName = "EVD:~MBR~Align",
orderNr = 0210
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_MBRIdTransfer = setRefClass(
"qcMetric_EVD_MBRIdTransfer",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"MBR Transfer: Last of two steps (1=align, 2=transfer) during Match-between-runs.
If MaxQuant only transfers peptide ID's which are not present in the target file, then
each Raw file should not have any duplicates of identical peptides (incl. charge).
Sometimes, a single or split 3D-peak gets annotated multiple times, that's ok. However, the same peptide should not
be annotated twice (or more) at vastly different points in RT.
This plot shows three columns:
- left: the 'genuine' situation (pretending that no MBR was computed)
- middle: looking only at transferred IDs
- right: combined picture (a mixture of left+middle, usually)
Each peptide falls into three categories (the colors):
- single (good, because it has either one genuine OR a transferred ID).
- in-group (also good, because all ID's are very close in RT)
- out-group (bad, spread across the RT gradient -- should not be possible; a false ID)
Heatmap score [EVD: MBR ID-Transfer]: The fraction of non-out-group peptides (i.e. good peptides) in the middle column.
This score is 'pessimistic' because if few ID's were transferred, but all of them are bad, the score is bad, even though
the majority of peptides is still ok (because they are genuine). However, in this case MBR
provides few (and wrong) additional information, and should be disabled.
",
workerFcn = function(.self, df_evd, df_evd_tf, avg_peak_width)
{
## completeness check
#stopifnot(c("...") %in% colnames(df_evd))
if (!checkInput(c("modified.sequence"), df_evd)) return()
df_evd_all = merge(df_evd, df_evd_tf, all = TRUE)
## increase of segmentation by MBR:
## three values returned: single peaks(%) in genuine, transferred and all(combined)
qMBR = peakSegmentation(df_evd_all)
head(qMBR)
## for groups: get their RT-spans
## ... genuine ID's only (as 'rtdiff_genuine')
## or genuine+transferred (as 'rtdiff_mixed'))
## Could be empty (i.e. no groups, just singlets) if data is really sparse ..
qMBRSeg_Dist = idTransferCheck(df_evd_all)
#head(qMBRSeg_Dist)
#head(qMBRSeg_Dist[qMBRSeg_Dist$fc.raw.file=="file 13",])
## Check which fraction of ID-pairs belong to the 'in-width' group.
## The allowed RT delta is given in 'avg_peak_width' (estimated from global peak width for each file)
qMBRSeg_Dist_inGroup = inMatchWindow(qMBRSeg_Dist, df.allowed.deltaRT = avg_peak_width)
## puzzle together final picture
scoreMBRMatch = computeMatchRTFractions(qMBR, qMBRSeg_Dist_inGroup)
#head(scoreMBRMatch)
#scoreMBRMatch[scoreMBRMatch$fc.raw.file=="file 3",]
## plot ID-transfer
lpl =
byX(scoreMBRMatch, scoreMBRMatch$fc.raw.file, 12, plot_MBRIDtransfer, sort_indices = FALSE)
##
## Quality
##
qualMBR.m = merge(scoreMBRMatch[scoreMBRMatch$sample=="genuine",],
scoreMBRMatch[scoreMBRMatch$sample=="transferred",], by="fc.raw.file")
qualMBR.m = merge(qualMBR.m, scoreMBRMatch[scoreMBRMatch$sample=="all",], by="fc.raw.file")
cname = .self$qcName
qualMBR.m[, cname] = 1 - qualMBR.m$multi.outRT.y # could be NaN if: no-transfer at all, or: no groups but only singlets transferred
qualMBR.m[is.na(qualMBR.m$multi.outRT.y) & !is.na(qualMBR.m$single.y), cname] = 1 ## only singlets transferred, wow...
qualMBR.m[is.na(qualMBR.m[, cname]), cname] = HEATMAP_NA_VALUE
qcScore = qualMBR.m[, c("fc.raw.file", cname)]
return(list(plots = lpl, qcScores = qcScore))
},
qcCat = "LC",
qcName = "EVD:~MBR~ID-Transfer",
orderNr = 0220
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_MBRaux = setRefClass(
"qcMetric_EVD_MBRaux",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Auxililiary plots -- experimental -- without scores.
Return a tree plot with a possible alignment tree.
This allows the user to judge which Raw files have similar corrected RT's (i.e. where aligned successfully).
If there are clear sub-clusters, it might be worth introducing artifical fractions into MaxQuant,
to avoid ID-transfer between these clusters (use the MBR-Align and MBR-ID-Transfer metrics to support the decision).
If the input contains fractions, leaf nodes will be colored accordingly.
Distinct sub-clusters should have their own color.
If not, MaxQuant's fraction settings should be optimized.
Note that introducing fractions in MaxQuant will naturally lead to a clustering here (it's somewhat circular).
Heatmap score: none.
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("type", "is.transferred", "calibrated.retention.time", "fc.raw.file", "modified.sequence", "charge"), df_evd)) return()
if (('fraction' %in% colnames(df_evd)) && (length(unique(df_evd$fraction)) > 1)) {
## fractions: there must be more than one, otherwise MQ will treat the samples as unfractionated
col_fraction = "fraction"
} else {
col_fraction = c()
}
lpl = list()
lpl[["tree"]] =
RTalignmentTree(df_evd[(df_evd$type %in% c("MULTI-MSMS")),
c("calibrated.retention.time", "fc.raw.file", col_fraction, "modified.sequence", "charge")],
col_fraction = col_fraction)
## MBR: additional evidence by matching MS1 by AMT across files
if (any(df_evd$is.transferred)) {
## gain for each raw file: absolute gain, and percent gain
mtr.df = plyr::ddply(df_evd, "fc.raw.file", function(x) {
match_count_abs = sum(x$is.transferred)
## if only matched IDs are present, this would be 'Inf' -- we limit that to 1e4
match_count_pc = min(1e4, round(100*match_count_abs/(nrow(x)-match_count_abs))) ## newIDs / oldIDs
return (data.frame(abs = match_count_abs, pc = match_count_pc))
})
lpl[["gain"]] =
plot_MBRgain(data = mtr.df, title_sub = "")
}
return(list(plots = lpl))
},
qcCat = "LC",
qcName = "EVD:~MBR~auxilliary",
orderNr = 0221
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_Charge = setRefClass(
"qcMetric_EVD_Charge",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Charge distribution per Raw file. For typtic digests, peptides of charge 2
(one N-terminal and one at tryptic C-terminal R or K residue) should be dominant.
Ionization issues (voltage?), in-source fragmentation, missed cleavages and buffer irregularities can
cause a shift (see Bittremieux 2017, DOI: 10.1002/mas.21544).
The charge distribution should be similar across Raw files.
Consistent charge distribution is paramount for comparable 3D-peak intensities across samples.
Heatmap score [EVD: Charge]: Deviation of the charge 2 proportion from a representative Raw file ('qualMedianDist' function).
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("is.transferred", "fc.raw.file", "charge"), df_evd)) return()
d_charge = mosaicize(df_evd[!df_evd$is.transferred, c("fc.raw.file", "charge")])
lpl =
byXflex(d_charge, d_charge$Var1, 30, plot_Charge, sort_indices = TRUE)
## QC measure for charge centeredness
qc_charge = plyr::ddply(df_evd[!df_evd$is.transferred, c("charge", "fc.raw.file")], "fc.raw.file", function(x) data.frame(c = (sum(x$charge==2)/nrow(x))))
qc_charge[, .self$qcName] = qualMedianDist(qc_charge$c)
return(list(plots = lpl, qcScores = qc_charge[, c("fc.raw.file", .self$qcName)]))
},
qcCat = "prep",
qcName = "EVD:~Charge",
orderNr = 0100
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_IDoverRT = setRefClass(
"qcMetric_EVD_IDoverRT",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Judge column occupancy over retention time.
Ideally, the LC gradient is chosen such that the number of identifications (here, after FDR filtering) is
uniform over time, to ensure consistent instrument duty cycles. Sharp peaks and uneven distribution of
identifications over time indicate potential for LC gradient optimization.
See [Moruz 2014, DOI: 10.1002/pmic.201400036](https://pubmed.ncbi.nlm.nih.gov/24700534/) for details.
Heatmap score [EVD: ID rate over RT]: Scored using 'Uniform' scoring function, i.e. constant receives good score, extreme shapes are bad.
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("retention.time", "fc.raw.file"), df_evd)) return()
raws_perPlot = 6
rt_range = range(df_evd$retention.time, na.rm = TRUE)
df_idRT = plyr::ddply(df_evd, "fc.raw.file", function(x) {
h = hist(x$retention.time, breaks=seq(from=rt_range[1]-3, to=rt_range[2]+3, by=3), plot = FALSE)
return(data.frame(RT = h$mid, counts = h$counts))
})
lpl =
byXflex(df_idRT, df_idRT$fc.raw.file, raws_perPlot, plot_IDsOverRT, sort_indices = TRUE)
## QC measure for uniform-ness
qcScore = plyr::ddply(df_evd[, c("retention.time", "fc.raw.file")], "fc.raw.file",
function(x) data.frame(metric = qualUniform(na.omit(x$retention.time))))
colnames(qcScore)[colnames(qcScore)=="metric"] = .self$qcName
return(list(plots = lpl, qcScores = qcScore))
},
qcCat = "LC",
qcName = "EVD:~ID~rate~over~RT",
orderNr = 0150
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_PreCal = setRefClass(
"qcMetric_EVD_PreCal",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Mass accurary before calibration. Outliers are marked as such ('out-of-search-tol') using ID rate and standard deviation as additional information (if available).
If any Raw file is flagged 'failed', increasing MaxQuant's first-search tolerance (20ppm by default, here: %1.1f ppm) might help
to enable successful recalibration.
A bug in MaxQuant sometimes leads to excessively high ppm mass errors (>10<sup>4</sup>) reported in the output
data. However, this can sometimes be corrected for by re-computing the delta mass error from other data. If this is
the case, a warning ('bugfix applied') will be shown.
Heatmap score [EVD: MS Cal Pre (%1.1f)]: the centeredness (function CenteredRef) of uncalibrated masses in relation to the search window size.
",
workerFcn = function(.self, df_evd, df_idrate, tolerance_pc_ppm, tolerance_sd_PCoutOfCal)
{
## completeness check
#stopifnot(c("...") %in% colnames(df_pg))
.self$helpText = sprintf(.self$helpTextTemplate, tolerance_pc_ppm, tolerance_pc_ppm)
if (!checkInput(c("fc.raw.file", "uncalibrated.mass.error..ppm."), df_evd)) return()
## for some mzTab (not recalibrated) 'mass.error..ppm.' is not there... but we only need a dummy
if (!("mass.error..ppm." %in% colnames(df_evd))) df_evd$mass.error..ppm. = 0
fix_cal = fixCalibration(df_evd, df_idrate, tolerance_sd_PCoutOfCal)
if (is.null(fix_cal)) {
warning("Internal error. Data missing. Skipping metric!", immediate. = TRUE)
return()
}
## some outliers can have ~5000ppm, blowing up the plot margins
## --> remove outliers
ylim_g = range(boxplot.stats(fix_cal$df_evd$uncalibrated.mass.error..ppm.)$stats[c(1, 5)], c(-tolerance_pc_ppm, tolerance_pc_ppm) * 1.05)
## PLOT
lpl =
byXflex(fix_cal$df_evd, fix_cal$df_evd$fc.raw.file, 20, plot_UncalibratedMSErr, sort_indices = TRUE,
MQBug_raw_files = fix_cal$affected_raw_files,
y_lim = ylim_g,
stats = fix_cal$stats,
extra_limit = tolerance_pc_ppm,
title_sub = fix_cal$recal_message)
## scores
qc_MS1deCal = plyr::ddply(fix_cal$df_evd, "fc.raw.file",
function(x) {
xd = na.omit(x$uncalibrated.mass.error..ppm.)
if (length(xd)==0) {
r = HEATMAP_NA_VALUE ## if empty, give the Raw file an 'NA' score
} else if (fix_cal$stats$outOfCal[fix_cal$stats$fc.raw.file == x$fc.raw.file[1]]) {
r = 0 ## if we suspect out-of-calibration, give lowest score
} else {
r = qualCenteredRef(xd, tolerance_pc_ppm)
}
return (data.frame(med_rat = r))
})
cname = sprintf(.self$qcName, tolerance_pc_ppm)
colnames(qc_MS1deCal) = c("fc.raw.file", cname)
return(list(plots = lpl, qcScores = qc_MS1deCal))
},
qcCat = "MS",
qcName = "EVD:~MS~Cal-Pre~(%1.1f)",
orderNr = 0260
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_PostCal = setRefClass(
"qcMetric_EVD_PostCal",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Precursor mass accuracy after calibration. Failed samples from precalibration data are still marked here.
Ppm errors should be centered on zero and their spread is expected to be significantly smaller than before calibration.
Heatmap score [EVD: MS Cal-Post]: The variance and centeredness around zero of the calibrated distribution (function GaussDev).
",
workerFcn = function(.self, df_evd, df_idrate, tolerance_pc_ppm, tolerance_sd_PCoutOfCal, tol_ppm_mainSearch)
{
## completeness check
#stopifnot(c("...") %in% colnames(df_pg))
if (!checkInput(c("uncalibrated.mass.error..ppm.", "mass", "mass.error..ppm."), df_evd)) return()
fix_cal = fixCalibration(df_evd, df_idrate, tolerance_sd_PCoutOfCal)
ylim_g = range(na.rm = TRUE, boxplot.stats(fix_cal$df_evd$mass.error..ppm.)$stats[c(1, 5)], c(-tol_ppm_mainSearch, tol_ppm_mainSearch) * 1.05)
## PLOT
lpl =
byXflex(fix_cal$df_evd, fix_cal$df_evd$fc.raw.file, 20, plot_CalibratedMSErr, sort_indices = TRUE,
MQBug_raw_files = fix_cal$affected_raw_files,
y_lim = ylim_g,
stats = fix_cal$stats,
extra_limit = tol_ppm_mainSearch,
title_sub = fix_cal$recal_message_post)
## QC measure for post-calibration ppm error
## .. assume 0 centered and StdDev of observed data
obs_par = plyr::ddply(fix_cal$df_evd[, c("mass.error..ppm.", "fc.raw.file")], "fc.raw.file",
function(x) data.frame(mu = mean(x$mass.error..ppm., na.rm = TRUE),
sd = sd(x$mass.error..ppm., na.rm = TRUE)))
qc_MS1Cal = data.frame(fc.raw.file = obs_par$fc.raw.file,
val = sapply(1:nrow(obs_par), function(x) qualGaussDev(obs_par$mu[x], obs_par$sd[x])))
## if we suspect out-of-calibration, give lowest score
qc_MS1Cal$val[qc_MS1Cal$fc.raw.file %in% fix_cal$stats$fc.raw.file[ fix_cal$stats$outOfCal ]] = 0
## MQ mass bugfix will not work for postCalibration, since values are always too low
qc_MS1Cal$val[qc_MS1Cal$fc.raw.file %in% fix_cal$stats$fc.raw.file[ fix_cal$stats$hasMassErrorBug ]] = HEATMAP_NA_VALUE
colnames(qc_MS1Cal)[colnames(qc_MS1Cal) == "val"] = .self$qcName
return(list(plots = lpl, qcScores = qc_MS1Cal))
},
qcCat = "MS",
qcName = "EVD:~MS~Cal-Post",
orderNr = 0270
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_Top5Cont = setRefClass(
"qcMetric_EVD_Top5Cont",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"PTXQC will explicitly show the five most abundant external protein contaminants
(as detected via MaxQuant's contaminants FASTA file) by Raw file, and summarize the
remaining contaminants as 'other'. This allows to track down which proteins exactly contaminate your sample.
Low contamination is obviously better.
The 'Abundance class' models the average peptide intensity in each Raw file and is visualized using varying degrees of
transparency. It is not unusual to see samples with low sample content to have higher contamination.
If you see only one abundance class ('mid'), this means all your Raw files have roughly
the same peptide intensity distribution.
If you see less than 5 contaminants, it either means there are actually less, or that one (or more) of the shortened contaminant names
subsume multiple of the top5 contaminants (since they start with the same prefix).
Heatmap score [EVD: Contaminants]: as fraction of summed intensity with 0 = sample full of contaminants; 1 = no contaminants
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("intensity", "contaminant", "fc.raw.file", "proteins"), df_evd)) return()
##
## elaborate contaminant fraction per Raw.file (this is not possible from PG, since raw files could be merged)
## find top 5 contaminants (globally)
##
## if possible, work on protein names (since MQ1.4), else use proteinIDs
if ("protein.names" %in% colnames(df_evd))
{
evd_pname = "protein.names"
} else if ("proteins" %in% colnames(df_evd)) {
evd_pname = "proteins"
} else {
stop("Top5-Contaminants: Neither 'protein.names' nor 'proteins' column was found in data but is required.")
}
## protein.names are sometimes not unique, e.g. if a contaminant is involved:
## "P02768;CON__P02768-1" and "P02768" will both give the same name (since contaminant name is empty)
## Thus, the distribution of bars will look slightly different (but summed percentages are identical)
## some protein.names are empty (usually the CON__ ones) ... so we substitute with ID
df_evd$pname = df_evd[, evd_pname];
df_evd$pname[df_evd$pname==""] = df_evd$proteins[df_evd$pname==""] ## a NOP if it already is 'proteins', but ok
df_evd.totalInt = sum(as.numeric(df_evd$intensity), na.rm = TRUE)
df_evd.cont.only = df_evd[df_evd$contaminant > 0,]
cont.top = by(df_evd.cont.only, df_evd.cont.only$pname, function(x) sum(as.numeric(x$intensity), na.rm = TRUE) / df_evd.totalInt*100)
cont.top.sort = sort(cont.top, decreasing = TRUE)
#head(cont.top.sort)
cont.top5.names = names(cont.top.sort)[1:5]
lpl = list()
if (is.null(cont.top5.names))
{
lpl[["noCont"]] = ggText("EVD: Top5 Contaminant per Raw file",
"No contaminants found in any sample.\n\nIncorporating contaminants during search is highly recommended!",
"red")
} else {
lpl =
byXflex(df_evd[, c("intensity", "pname", "fc.raw.file", "contaminant")], df_evd$fc.raw.file, 40, sort_indices = TRUE,
plot_ContEVD, top5=cont.top5.names)
}
## QC measure for contamination
qc_cont = plyr::ddply(df_evd[, c("intensity", "contaminant", "fc.raw.file")], "fc.raw.file",
function(x) {
val = ifelse(is.null(cont.top5.names),
HEATMAP_NA_VALUE, ## use NA in heatmap if there are no contaminants
1-qualLinThresh(sum(as.numeric(x$intensity[x$contaminant]), na.rm = TRUE)/
sum(as.numeric(x$intensity), na.rm = TRUE)))
return(data.frame(val = val, check.names = FALSE))
}
)
colnames(qc_cont)[colnames(qc_cont) == "val"] = .self$qcName
return(list(plots = lpl, qcScores = qc_cont))
},
qcCat = "Prep",
qcName = "EVD:~Contaminants",
orderNr = 0010
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_MS2OverSampling = setRefClass(
"qcMetric_EVD_MS2OverSampling",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"An oversampled 3D-peak is defined as a peak whose peptide ion (same sequence and same charge
state) was identified by at least two distinct MS<sup>2</sup> spectra in the same Raw file.
For high complexity samples, oversampling of individual 3D-peaks automatically leads to undersampling
or even omission of other 3D-peaks, reducing the number of identified peptides. Oversampling occurs in
low-complexity samples or long LC gradients, as well as undersized dynamic exclusion windows for data
independent acquisitions.
Heatmap score [EVD: MS<sup>2</sup> Oversampling]: The percentage of non-oversampled 3D-peaks.
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("fc.raw.file", "ms.ms.count"), df_evd)) return()
d_dups = plyr::ddply(df_evd, "fc.raw.file", function(x) {
tt = as.data.frame(table(x$ms.ms.count), stringsAsFactors = FALSE)
tt$Count = as.numeric(tt$Var1)
## remove "0", since this would be MBR-features
tt = tt[tt$Count!=0,]
## summarize everything above 3 counts
if (any(tt$Count >= 3)) {
tt$Count[tt$Count >= 3] = "3+"
tt = plyr::ddply(tt, "Count", function(x) data.frame(Freq=sum(x$Freq)))
}
## make counts relative
fraction = tt$Freq / sum(tt$Freq) * 100
return (data.frame(n=as.character(tt$Count), fraction = fraction))
})
lpl =
byXflex(d_dups, d_dups$fc.raw.file, 30, plot_MS2Oversampling, sort_indices = TRUE)
## QC measure for how many peaks were fragmented only once
qc_evd_twin = d_dups[d_dups$n==1,]
cname = .self$qcName
qc_evd_twin[, cname] = qualLinThresh(qc_evd_twin$fraction/100)
return(list(plots = lpl, qcScores = qc_evd_twin[, c("fc.raw.file", cname)]))
},
qcCat = "MS",
qcName = "EVD:~MS^2~Oversampling",
orderNr = 0250
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_CarryOver = setRefClass(
"qcMetric_EVD_CarryOver",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Sample carryover ... not ready yet...
Heatmap score [EVD: Carryover]: The percentage of peptide identifications whose sequence gives rise to a large 'retention span'.
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("fc.raw.file", "ms.ms.count", "retention.length"), df_evd)) return()
raws_perPlot = 6
aboveThresh_fn = function(data) {
#t = quantile(data, probs=0.75, na.rm=TRUE) + 3*IQR(data, na.rm = TRUE)
t = median(data, na.rm=TRUE) * 10
return(t)
}
rt_range = range(df_evd$retention.time, na.rm = TRUE)
df_carry = plyr::ddply(df_evd, "fc.raw.file", function(x) {
thresh = round(aboveThresh_fn(x$retention.length), 1)
weirds = x[x$retention.length > thresh,]
## remove "0", since this would be MBR-features
weirds = weirds[weirds$ms.ms.count!=0,]
if (nrow(weirds) == 0) return(data.frame())
weirds$fc.raw.file = paste0(weirds$fc.raw.file, " (>", thresh, " min)")
h = hist(weirds$retention.time, breaks=seq(from=rt_range[1]-3, to=rt_range[2]+3, by=3), plot = FALSE)
result = data.frame(RT = h$mid, counts = h$counts, fn = weirds$fc.raw.file[1])
return(result)
})
#df_carry = result
df_carry$fc.raw.file = df_carry$fn
lpl =
byXflex(df_carry, df_carry$fc.raw.file, raws_perPlot, plot_DataOverRT, sort_indices = TRUE,
title = "EVD: Peptides with wide RT span", y_lab = "# of Peptide Sequences")
lpl
qc_evd_carry = plyr::ddply(df_evd, "fc.raw.file", function(x) {
thresh = aboveThresh_fn(x$retention.length);
pc = sum(x$retention.length > thresh, na.rm = TRUE) / nrow(x)
return (data.frame(larger_pc=pc))
})
## QC measure for how many IDs are part of a large span
cname = .self$qcName
qc_evd_carry[, cname] = qualLinThresh(1 - qc_evd_carry$larger_pc)
return(list(plots = lpl, qcScores = qc_evd_carry[, c("fc.raw.file", cname)]))
},
qcCat = "MS",
qcName = "EVD:~CarryOver",
orderNr = 0250
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_MissingValues = setRefClass(
"qcMetric_EVD_MissingValues",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
"Missing peptide intensities per Raw file from evidence.txt.
This metric shows the fraction of missing peptides compared to all peptides seen in the whole experiment.
The more Raw files you have, the higher this fraction is going to be (because there is always going
to be some exotic [low intensity?] peptide which gets [falsely] identified in only a single Raw file).
A second plot shows how many peptides (Y-axis) are covered by at least X Raw files.
A third plot shows the density of the observed (line) and the missing (filled area) data.
To reconstruct the distribution of missing values, an imputation strategy is required, so the argument is somewhat
circular here. If all Raw files are (technical) replicates, i.e. we can expect that missing peptides are indeed
present and have an intensity similar to the peptides we do see, then the median is a good estimator.
This method performs a global normalization across Raw files (so their observed intensitiy distributions have the same mean),
before computing the imputed values. Afterwards, the distributions are de-normalized again (shifting them back to their)
original locations -- but this time with imputed peptides.
Peptides obtained via Match-between-run (MBR) are accounted for (i.e. are considered as present = non-missing).
Thus, make sure that MBR is working as intended (see MBR metrics).
<b>Warning:</b> this metric is meaningless for fractionated data!
<b>TODO:</b> compensate for lower scores in large studies (with many Raw files), since peptide FDR is accumulating!?
Heatmap score [EVD: Pep Missing]: Linear scale of the fraction of missing peptides.
",
workerFcn = function(.self, df_evd)
{
## completeness check
if (!checkInput(c("fc.raw.file", "modified.sequence", "intensity"), df_evd)) return()
if (('fraction' %in% colnames(df_evd)) && (length(unique(df_evd$fraction)) > 1)) {
lpl = list(ggText("Missing Values Skipped", "Missing values calculation skipped. Fractionated data detected!"))
return(list(plots = lpl))
}
if (length(unique(df_evd$fc.raw.file)) < 2) {
lpl = list(ggText("Missing Values Skipped", "Need more than one Raw file!"))
return(list(plots = lpl))
}
## make peptides unique per Raw file
df_u = plyr::ddply(df_evd[ , c("fc.raw.file", "modified.sequence")], "fc.raw.file",
function(x) {
return(x[!duplicated(x$modified.sequence),])
})
global_peps = unique(df_u$modified.sequence)
global_peps_count = length(global_peps)
## percent identified in each Raw file
pep_set = plyr::ddply(df_u[ , c("fc.raw.file", "modified.sequence")], "fc.raw.file",
function(x) {
score = 100*length(intersect(global_peps, x$modified.sequence)) / global_peps_count
return(data.frame(idFraction = score))
})
lpl = byXflex(pep_set, pep_set$fc.raw.file, subset_size = 50, FUN = function(dx) {
p = ggplot(dx) +
geom_bar(aes_string(x = "fc.raw.file", y = "idFraction"), stat = "identity") +
ggtitle("[experimental] EVD: Non-Missing Peptides", "compared to all peptides seen in experiment") +
xlab("") +
ylab("Fraction of total peptides [%]") +
ylim(0, 100) +
scale_x_discrete_reverse(dx$fc.raw.file) +
coord_flip()
return(p)
})
#for (pl in lpl) print(pl)
tbl = table(df_u$modified.sequence)
head(tbl)
tbl_smry = as.data.frame(table(tbl))
tbl_smry$FreqRel = tbl_smry$Freq / global_peps_count
tbl_smry = tbl_smry[nrow(tbl_smry):1,] ## invert
tbl_smry$FreqCum = cumsum(tbl_smry$FreqRel) * 100
tbl_smry$x = as.numeric(tbl_smry$tbl)
p = ggplot(tbl_smry, aes_string(x = "x", y = "FreqCum")) +
geom_line() +
geom_point() +
ggtitle("[experimental] EVD: Non-missing by set") +
xlab("Minimum # Raw files") +
ylab("Fraction of total peptides [%]") +
ylim(0, 100)
lpl[["missingCul"]] = p
## intensity distribution of missing values
df_evd$logInt = log2(df_evd$intensity)
lpl_dens = byXflex(df_evd[, c("modified.sequence", "fc.raw.file", "logInt")], df_evd$fc.raw.file,
subset_size = 5, FUN = function(dx) {
d_mat = reshape2::dcast(dx, modified.sequence ~ fc.raw.file, fun.aggregate = mean, value.var = "logInt")
## ... normalization factors
d_mat_mult = sapply(2:ncol(d_mat), function(x) {
mult = mean(d_mat[, x] / d_mat[, 2], na.rm = TRUE)
return(mult)
})
df_mult = data.frame(fc.raw.file = colnames(d_mat)[-1], mult = d_mat_mult)
## .. normalize data
d_mat_n = d_mat
d_mat_n[, -1] = sweep( d_mat_n[, -1, drop=FALSE], 2, d_mat_mult, '/')
##
head(d_mat_n)
## find impute value
pep_mean = rowMeans(d_mat_n[, -1, drop=FALSE], na.rm = TRUE)
df_missing = plyr::ddply(df_mult, "fc.raw.file", function(x) {
## get set of missing values
values = pep_mean[is.na(d_mat_n[, as.character(x$fc.raw.file)])]
## de-normalize (back to old intensity range)
values = values * x$mult
return(data.frame(missingVals = values))
})
head(df_missing)
pl = ggplot(df_missing, aes_string(x = "missingVals", col="fc.raw.file", fill = "fc.raw.file")) +
geom_area(position = position_dodge(width=0), binwidth = 0.5, stat="bin", alpha=0.5) +
geom_freqpoly(data = dx, aes_string(x = "logInt", col="fc.raw.file"), binwidth = 0.5, size = 1.2) +
xlab("Intensity [log2]") +
ggtitle(" [experimental] EVD: Imputed Peptide Intensity Distribution of Missing Values") +
scale_fill_manual(values = rep(RColorBrewer::brewer.pal(6,"Accent"), times=40), guide = guide_legend("")) +
scale_colour_manual(values = rep(RColorBrewer::brewer.pal(6,"Accent"), times=40), guide = "none")
return(pl)
})
lpl = append(lpl, lpl_dens)
## QC measure for fraction of missing values
cname = .self$qcName
pep_set[, cname] = qualLinThresh(pep_set$idFraction, 100) ## a no-op, just for clarity
qcScore = pep_set[, c("fc.raw.file", cname)]
return(list(plots = lpl, qcScores = qcScore))
},
qcCat = "prep",
qcName = "EVD:~Pep~Missing~Values",
orderNr = 0390 # just before peptide count
)
return(.self)
})
)
#####################################################################
qcMetric_EVD_UpSet = setRefClass(
"qcMetric_EVD_UpSet",
contains = "qcMetric",
methods = list(initialize=function() { callSuper(
helpTextTemplate =
'The metric shows an upSet plot based on the number of modified peptide sequences per Raw file, intersected or merged with other Raw files (see below for details).<br>
If the number of Raw files is >=6, only the `distinct` plot is generated (the other two are skipped for performance reasons).
<a href="https://raw.githubusercontent.com/cbielow/PTXQC/master/inst/reportTemplate/modes_UpSet.png" target="_blank" rel="noopener"><span>See here for an example plot showing how the set size is computed</span> </a>.
Definition: An `active set` is the set of black dots in a column of the plot -- as opposed to the grey dots (you will understand when you see it).
<p>
<b>distinct:</b> shows the number of sequences that are present in ALL active sets. For three Raw files and active sets A and B, this would mean all sequences which occur in A and B (intersect), but not in C (setdiff).<br>
<b>intersection:</b> shows the number of sequences that occurs in all active sets (intersection).<br>
<b>union:</b> shows the number of sequences that occurs in total. For two files that are all sequences that occurs either in A or in B (union).<br>
<p>
Heatmap score [EVD: UpSet]: The proportion of sequences that the file has in common with all other files.
',
workerFcn = function(.self, df_evd)
{
if (!checkInput(c("modified.sequence", "fc.raw.file"), df_evd)) return()
getOutputWithMod = function(dl, mode){
unlist(sapply(1:length(dl), function(numElem){
comb = combn(names(dl),numElem)
sapply(1:ncol(comb), function(x){
sets = comb[,x]
exp = as.expression(paste(sets, collapse = "&"))
value = length(Reduce(mode, dl[sets]))
names(value) = exp
return(value)
})
}))
}
lf = tapply(df_evd$modified.sequence, df_evd$fc.raw.file, function(x){return(list(unique(x)))})
# get rid of rawfiles without any PepIDs
lf = Filter(function(l) length(l)>0 && any(!is.na(l)), lf)
if (length(lf) <= 1)
{
lpl = list(ggText("UpSetR", "Only single Raw file detected. Cannot compute unions/intersections."))
return(list(plots = lpl, titles = list("EVD: UpSet")))
}
lpl = list(UpSetR::upset(UpSetR::fromList(lf), nsets = min(20, length(lf)), keep.order = TRUE, mainbar.y.label = "distinct size"))
if (length(lf) < 6)
{ ## performance for enumerating all supersets forbids doing it on larger sets until we make this code smarter...
lpl[[2]] = UpSetR::upset(UpSetR::fromExpression(getOutputWithMod(lf, intersect)), mainbar.y.label = "intersection size")
lpl[[3]] = UpSetR::upset(UpSetR::fromExpression(getOutputWithMod(lf, union)), mainbar.y.label = "union size")
}
titles = list("EVD: UpSet distinct",
"EVD: UpSet intersect",
"EVD: UpSet union")[1:length(lpl)]
score = sapply(1:length(names(lf)), function(x){
union = unique(unlist(lf[-x]))
inters = intersect(lf[[x]], union)
score = length(inters)/length(union)
return(score)
})
qcScore = data.frame(fc.raw.file = names(lf), score = score)
colnames(qcScore)[2] = .self$qcName
return(list(plots = lpl, title = titles, qcScores = qcScore))
},
qcCat = "LC",
qcName = "EVD:~UpSet",
orderNr = 0500 # just before peptide count
)
return(.self)
})
)
|
aec36a979348594c9b47db1593a39c9666dfd2a2
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/3865_0/rinput.R
|
4d0063f38b5f688a17a83b4b4c113aae30012c66
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("3865_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3865_0_unrooted.txt")
|
e6bf053a2948a65f2e83554669f939c07d71c23d
|
0a8007a2a68bdcd0bcb3a84be4422ea25e42c806
|
/TranscriptionFactorBinding.R
|
d6c186d78566747ae46287dee536fc17f2f59b50
|
[] |
no_license
|
mbanf/CNS_BindingPrediction
|
44bb14f16b6b3bb9c0077695e27ce81627659e72
|
855c6035c4d2ec320567da3ce0ff0e57b977deac
|
refs/heads/master
| 2022-05-18T14:10:35.092179
| 2022-05-01T13:48:00
| 2022-05-01T13:48:00
| 81,061,652
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 77,496
|
r
|
TranscriptionFactorBinding.R
|
# TODO: Add comment
#
# Author: michaelbanf
###############################################################################
# load a multiset of binding motifs - last curated spring 2016
get_cell_and_pnas_Paper_PWMs <- function(){
print("prepare PWM motifs and mappings")
library(fume)
## create motif - gene mapping
df.motifs <- data.frame(Motif.id = character(), TF.locus = character(), TF.name = character(), TF.family = character(), TF.subfamily = character(), src = character(), stringsAsFactors = FALSE)
#lst.motifs <- vector(mode = "list")
# Cell paper
files <- list.files("Datasets/novelTFmotifs/Ath_CellPaper/pwms_all_motifs/")
motif.mapping <- read.table("Datasets/novelTFmotifs/Ath_CellPaper/TF_Information_all_motifs.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
motif.mapping$DBID <- gsub("\\-.*", "", motif.mapping$DBID)
n.cell <- length(files)
for(i in 1:n.cell){
motif.id <- substr(files[i], 1, nchar(files[i])-4)
#lst.motifs[[i]] <- vector(mode = "list", length = 2)
#lst.motifs[[i]][[1]] <- motif.id
idx <- match(motif.id, motif.mapping$Motif_ID)
#lst.motifs[[i]][[2]] <- motif.mapping$DBID[idx]
newrow <- data.frame(Motif.id = motif.id, TF.locus = motif.mapping$DBID[idx], TF.name = motif.mapping$TF_Name[idx], TF.family = motif.mapping$Family_Name[idx], TF.subfamily = "", src = "Cell", stringsAsFactors = FALSE)
names(newrow) = c("Motif.ID", "TF.locus", "TF.name", "TF.family", "TF.subfamily", "src")
df.motifs <- rbind(df.motifs, newrow)
}
## map motifs
lst.pwm.motif <- vector(mode = "list", length = length(files))
for(i in 1:n.cell){
#print(paste("TF Motif ", i, "of", length(files)))
lst.pwm.motif[[i]] <- read.table(paste("Datasets/novelTFmotifs/Ath_CellPaper/pwms_all_motifs/", files[i], sep = ""), header = TRUE, sep = "\t", stringsAsFactors = FALSE)
lst.pwm.motif[[i]] <- lst.pwm.motif[[i]][,-1]
lst.pwm.motif[[i]] <- t(as.matrix(lst.pwm.motif[[i]]))
names(lst.pwm.motif)[i] <- df.motifs$Motif.ID[i]
colnames(lst.pwm.motif[[i]]) <- as.character(seq(1:ncol(lst.pwm.motif[[i]])))
}
### ---------
# PNAS paper
df.pwms.pnas <- read.table("Datasets/novelTFmotifs/pnas_paper/all_pnas_pwms.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
n.pnas <- nrow(df.pwms.pnas)/4
idx <- 1
for(i in 1:n.pnas){
motif.id <- paste("motif_pnas",i, sep = "")
vec.rows <- c(idx, idx + 1, idx + 2, idx + 3)
pnas.motif <- as.matrix(df.pwms.pnas[vec.rows, seq(2,11)])
rownames(pnas.motif) <- c("A","C","G","T")
colnames(pnas.motif) <- as.character(seq(1:ncol(pnas.motif)))
#lst.motifs[[i]] <- vector(mode = "list", length = 2)
#lst.motifs[[i]][[1]] <- motif.id
lst.pwm.motif <- lappend(lst.pwm.motif, pnas.motif)
names(lst.pwm.motif)[[(n.cell + i)]] <- motif.id #df.pwms.pnas$TF.locus[idx]
# check for identical list elements
newrow <- data.frame(Motif.id = motif.id, TF.locus = df.pwms.pnas$TF.locus[idx], TF.name = df.pwms.pnas$TF.name[idx], TF.family = df.pwms.pnas$TF.Family[idx], TF.subfamily = df.pwms.pnas$TF.Subfamily[idx], src = "PNAS")
names(newrow) = c("Motif.ID", "TF.locus", "TF.name", "TF.family", "TF.subfamily", "src")
df.motifs <- rbind(df.motifs, newrow)
idx <- idx + 4
}
# JASPAR 2014
# map tf name to ..
#
#source("http://bioconductor.org/biocLite.R")
#biocLite("JASPAR2014")
#biocLite("TFBSTools")
#biocLite("RSQLite")
#library(RSQLite)
library(TFBSTools)
#library(JASPAR2014)
# opts = list()
# opts[["tax_group"]] = "plants"
# PFMatrixList = getMatrixSet(JASPAR2014, opts)
#
#
# vec.jaspar.motifs <- c("MA0008.1", "MA0121.1", "MA0548.1", "MA0549.1", "MA0550.1", "MA0551.1",
# "MA0552.1", "MA0553.1", "MA0554.1", "MA0555.1", "MA0556.1", "MA0557.1",
# "MA0558.1","MA0559.1","MA0560.1", "MA0561.1", "MA0562.1", "MA0563.1",
# "MA0564.1","MA0565.1","MA0566.1","MA0567.1","MA0568.1","MA0569.1","MA0570.1",
# "MA0005.2","MA0571.1","MA0572.1","MA0110.2","MA0573.1","MA0574.1",
# "MA0575.1","MA0576.1","MA0577.1","MA0578.1","MA0579.1","MA0580.1","MA0581.1",
# "MA0582.1","MA0583.1","MA0584.1","MA0001.2","MA0585.1","MA0586.1","MA0587.1",
# "MA0588.1","MA0589.1","MA0590.1")
#
#
# vec.TF.names <- sapply(PFMatrixList.Ath, function(x) x@name, simplify = TRUE)
# vec.TF.names[40] <- "RAV1"
#
# df.locus.set <- subset(df.gene_names, df.gene_names$primary_gene_symbol %in% vec.TF.names)
#df.pwms.jaspar2014 <- read.table("Datasets/novelTFmotifs/Jaspar2014/pfm_plants_JASPAR_Ath_only.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
#PFMatrixList.Ath <- PFMatrixList[vec.jaspar.motifs]
# saveRDS(PFMatrixList.Ath, "Datasets/novelTFmotifs/Jaspar2014/PFMatrixList.Ath.rds")
PFMatrixList.Ath <- readRDS("Datasets/novelTFmotifs/Jaspar2014/PFMatrixList.Ath.rds")
df.jaspar.motif.info <- read.table("Datasets/novelTFmotifs/Jaspar2014/jaspar2014AthMotifTable.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
for(i in 1:length(PFMatrixList.Ath)){
motif.id <- PFMatrixList.Ath[[i]]@ID
pfm <- PFMatrixList.Ath[[i]]@profileMatrix
jaspar.motif <- apply(pfm, 2, function(x) x / sum(x))
colnames(jaspar.motif) <- as.character(seq(1:ncol(jaspar.motif)))
lst.pwm.motif <- lappend(lst.pwm.motif, jaspar.motif)
names(lst.pwm.motif)[[(n.cell + n.pnas + i)]] <- motif.id
df.jaspar.motif.info.sset <- subset(df.jaspar.motif.info, df.jaspar.motif.info$Motif.ID == motif.id)
# check for identical list elements
newrow <- data.frame(Motif.id = motif.id, TF.locus = df.jaspar.motif.info.sset$TF.locus, TF.name = df.jaspar.motif.info.sset$TF.name,
TF.family = df.jaspar.motif.info.sset$TF.family, TF.subfamily = "", src = "JASPAR2014")
names(newrow) = c("Motif.ID", "TF.locus", "TF.name", "TF.family", "TF.subfamily", "src")
df.motifs <- rbind(df.motifs, newrow)
}
idx <- which(is.na(df.motifs$TF.locus))
df.motifs <- na.omit(df.motifs)
lst.pwm.motif[idx] <- NULL
return(list(lst.pwm.motif, df.motifs))
}
##### mapping ######
perform_TFBS_mapping_all_genes <- function(vec.genes, v.promoter_length = 1000, v.th.bind_score = 4, nr.cores = 15){
print("compute scored all gene GRN")
library(GenomicFeatures)
# source("http://bioconductor.org/biocLite.R")
# biocLite("GenomicFeatures")
# biocLite("ChIPpeakAnno")
# biocLite("biomaRt")
# biocLite("Biostrings")
# install.packages("VennDiagram")
#library(ChIPpeakAnno)
library(biomaRt)
library(Biostrings)
#biocLite("BSgenome.Athaliana.TAIR.TAIR9")
library(BSgenome.Athaliana.TAIR.TAIR9)
genome <- BSgenome.Athaliana.TAIR.TAIR9
#source("http://bioconductor.org/biocLite.R")
#biocLite("TxDb.Athaliana.BioMart.plantsmart21")
library(TxDb.Athaliana.BioMart.plantsmart21)
transcriptCoordsByGene.GRangesList <- transcriptsBy (TxDb.Athaliana.BioMart.plantsmart21, by = "gene")
transcriptCoordsByGene.GRangesList <- renameSeqlevels( transcriptCoordsByGene.GRangesList, c("Chr1", "Chr2", "Chr3", "Chr4", "Chr5", "ChrM", "ChrC") )
transcriptCoordsByGene.GRangesList <- transcriptCoordsByGene.GRangesList[names(transcriptCoordsByGene.GRangesList) %in% vec.genes]
df.promSequences <- getPromoterSeq(transcriptCoordsByGene.GRangesList, genome, upstream=v.promoter_length, downstream=0)
#install.packages("foreach")
library(foreach)
library(doMC)
registerDoMC(nr.cores)
lst.motifs <- get_cell_and_pnas_Paper_PWMs()
lst.pwm.motif <- lst.motifs[[1]]
df.motifs <- lst.motifs[[2]]
foreach(i = 1:length(lst.pwm.motif)) %dopar% {
#for(i in 1:length(lst.pwm.motif)){
#system.time(for(i in 1:2){
print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
df.grn.all_motifs <- data.frame(TF = character(), Target = character(), TF.Motif = character(),
bind_score = numeric(), cmp_bind_score = numeric(), BS_pos_to_TSS = numeric(),
stringsAsFactors = FALSE)
names(df.grn.all_motifs) <- c("TF", "Target", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_to_TSS")
pcm <- round(100 * lst.pwm.motif[[i]])
for(j in 1:length(df.promSequences)){
# print(paste(j, "of", length(df.promSequences)))
#hits <- matchPWM(lst.pwm.motif[[i]], DNAString(toString(df.promSequences[[j]][[1]])) , with.score = TRUE)
hits <- matchPWM(pcm, DNAString(toString(df.promSequences[[j]][[1]])) , with.score = TRUE)
#matchPWM(reverseComplement(pcm), DNAString(toString(df.promSequences[[j]][[1]])) , with.score = TRUE)
nhits <- length(hits)
if(nhits >= 1){
# cmp_bind_score <- min(mcols(hits)$score / maxScore(lst.pwm.motif[[i]])) # should be >= 0.8
cmp_bind_score <- min(mcols(hits)$score / maxScore(pcm)) # should be >= 0.8
motif.score <- mcols(hits)$score / 100
if(cmp_bind_score >= 0.8){
for(k in 1:nhits){
if(motif.score[k] >= v.th.bind_score){
newrow <- data.frame(TF = as.character(df.motifs$TF.locus[i]),
Target = names(df.promSequences[[j]])[1],
TF.Motif = names(lst.pwm.motif)[i],
bind_score = motif.score[k],
cmp_bind_score = cmp_bind_score,
BS_pos_to_TSS = abs(end(hits)[k] + start(hits)[k])/2,
stringsAsFactors = FALSE)
df.grn.all_motifs <- rbind(df.grn.all_motifs, newrow)
}
}
}
}
}
names(df.grn.all_motifs) <- c("TF", "Target", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_to_TSS")
saveRDS(df.grn.all_motifs, paste("Datasets/novelTFmotifs/tmp/df.grn.all_motifs_",i,".rds", sep = ""))
}
# combine
df.grn.all_motifs <- data.frame(TF = character(), Target = character(), TF.Motif = character(),
bind_score = numeric(), cmp_bind_score = numeric(), BS_pos_to_TSS = numeric(),
stringsAsFactors = FALSE)
for(i in 1:length(lst.pwm.motif)){
print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
df.grn.all_motifs <- rbind(df.grn.all_motifs, readRDS(paste("Datasets/novelTFmotifs/tmp/df.grn.all_motifs_",i,".rds", sep = "")))
}
names(df.grn.all_motifs) <- c("TF", "Target", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_to_TSS")
saveRDS(df.grn.all_motifs, paste("Datasets/novelTFmotifs/df.grn.all_motifs.rds", sep = ""))
# Filter
df.grn.all_motifs <- readRDS(paste("Datasets/novelTFmotifs/df.grn.all_motifs.rds", sep = ""))
df.grn.all_motifs <- df.grn.all_motifs[,-c(3,5)]
names(df.grn.all_motifs) <- c("TF", "Target", "bind_score", "BS_pos_to_TSS")
vec.targs <- unique(df.grn.all_motifs$Target)
n.targs <- length(unique(df.grn.all_motifs$Target))
foreach(i = 1:n.targs) %dopar% {
#for(i in 1:10){#n.BSpos){
#for(i in 1:length(vec.sequences)){
df.grn.all_motifs_filtered <- data.frame(TF = character(), Target = character(), bind_score = numeric(), BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
print(paste(i, "of",n.targs))
df.sset <- subset(df.grn.all_motifs, df.grn.all_motifs$Target == vec.targs[i])
if(nrow(df.sset) > 0){
tfs <- unique(df.sset$TF)
for(t.1 in 1:length(tfs)){
df.sset.2 <- subset(df.sset, df.sset$TF == tfs[t.1])
targs <- unique(df.sset.2$Target)
for(t.2 in 1:length(targs)){
df.sset.3 <- subset(df.sset.2, df.sset.2$Target == targs[t.2])
vec.BSpos <- unique(df.sset.3$BS_pos_to_TSS)
for(t.3 in 1:length(vec.BSpos)){
df.sset.4 <- subset(df.sset.3, df.sset.3$BS_pos_to_TSS == vec.BSpos[t.3])
df.sset.4 <- subset(df.sset.4, df.sset.4$bind_score == max(unique(df.sset.4$bind_score))) # could be more then one... filter
df.grn.all_motifs_filtered <- rbind(df.grn.all_motifs_filtered, df.sset.4[1,])
}
}
}
}
saveRDS(df.grn.all_motifs_filtered, paste("Datasets/novelTFmotifs/tmp/df.grn.all_motifs_filtered_",i,".rds", sep = ""))
}
df.grn.all_motifs_filtered <- data.frame(TF = character(), Target = character(), bind_score = numeric(), BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
for(i in 1:n.targs){
print(paste(i, "of", n.targs))
df.grn.all_motifs_filtered <- rbind(df.grn.all_motifs_filtered, readRDS(paste("Datasets/novelTFmotifs/tmp/df.grn.all_motifs_filtered_",i,".rds", sep = "")))
}
names(df.grn.all_motifs_filtered) <- c("TF", "Target", "bind_score", "BS_pos_to_TSS")
saveRDS(df.grn.all_motifs_filtered, paste("Datasets/novelTFmotifs/df.grn.all_motifs_filtered", sep = ""))
}
exact_map_seq_motifs <- function(vec.genes, v.promoter_length = 1000, v.th.seqMotif_length = 4){
library(GenomicFeatures)
library(biomaRt)
library(Biostrings)
library(BSgenome.Athaliana.TAIR.TAIR9)
genome <- BSgenome.Athaliana.TAIR.TAIR9
library(TxDb.Athaliana.BioMart.plantsmart21)
transcriptCoordsByGene.GRangesList <- transcriptsBy (TxDb.Athaliana.BioMart.plantsmart21, by = "gene")
transcriptCoordsByGene.GRangesList <- renameSeqlevels( transcriptCoordsByGene.GRangesList, c("Chr1", "Chr2", "Chr3", "Chr4", "Chr5", "ChrM", "ChrC") )
transcriptCoordsByGene.GRangesList <- transcriptCoordsByGene.GRangesList[names(transcriptCoordsByGene.GRangesList) %in% vec.genes]
df.promSequences <- getPromoterSeq(transcriptCoordsByGene.GRangesList, genome, upstream=v.promoter_length, downstream=0)
df.seqMotifs <- read.table("Datasets/novelTFmotifs/athamap/athmap_seqMotifs.txt", header = TRUE, stringsAsFactor = FALSE, sep = "\t")
df.seqMotifs <- subset(df.seqMotifs, nchar(df.seqMotifs$verified_binding_seq) >= v.th.seqMotif_length)
df.seqMotifs$TF.locus <- toupper(df.seqMotifs$TF.locus)
df.seqMotifs$TF.locus <- unlist(sapply(df.seqMotifs$TF.locus, function(x) gsub(" ","",x, fixed=TRUE)))
df.seqMotifs <- subset(df.seqMotifs, !df.seqMotifs$TF.locus %in% vec.regs.novel.matrix)
dict_seq_motifs <- DNAStringSet(df.seqMotifs$verified_binding_seq)
#pdict_seq_motifs <- PDict(dict_seq_motifs)
foreach(i = 1:length(df.promSequences)) %dopar% {
#for(i in 1:length(df.promSequences)){
df.grn.seqMotifs <- data.frame(TF = character(), seq.motif = character(), Target = character(), BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
print(paste(i,"of",length(df.promSequences)))
Target <- names(df.promSequences[[i]][1])
# find these motifs in promoters - get genes
motifMatches <- matchPDict(dict_seq_motifs, DNAString(toString(df.promSequences[[i]][1])),max.mismatch=0, min.mismatch=0)
idxes <- countIndex(motifMatches)
# how many per promoter and position -
for(j in 1:length(idxes)){
if(idxes[j] > 0){
for(k in 1:idxes[j]){
# SNP position in promoter
mean_dist_to_TSS <- round(mean(c(endIndex(motifMatches)[[j]][k], startIndex(motifMatches)[[j]][k])))
BS_pos_to_TSS <- v.promoter_length - mean_dist_to_TSS
newrow <- data.frame(TF = df.seqMotifs$TF.locus[j], Target = Target, BS_pos_to_TSS = BS_pos_to_TSS)
df.grn.seqMotifs <- rbind(df.grn.seqMotifs, newrow)
}
}
}
names(df.grn.seqMotifs) <- c("TF", "Target", "BS_pos_to_TSS")
saveRDS(df.grn.seqMotifs, paste("Datasets/novelTFmotifs/tmp/df.grn.seqMotifs_",i,".rds", sep = ""))
}
df.grn.seqMotifs <- data.frame(TF = character(), Target = character(), bind_score = numeric(), BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
for(i in 1:length(df.promSequences)){
print(paste(i, "of", length(df.promSequences)))
df.grn.seqMotifs <- rbind(df.grn.seqMotifs, readRDS(paste("Datasets/novelTFmotifs/tmp/df.grn.seqMotifs_",i,".rds", sep = "")))
}
names(df.grn.seqMotifs) <- c("TF", "Target", "BS_pos_to_TSS")
saveRDS(df.grn.seqMotifs, "Datasets/novelTFmotifs/df.grn.seqMotifs.rds")
}
##### Conservation based ######
extract_CNS2014 <- function(load_from_file = TRUE, v.promoter_length = 1000){
if(!load_from_file){
source("http://bioconductor.org/biocLite.R")
# biocLite("ChIPpeakAnno")
# biocLite("biomaRt")
# biocLite("Biostrings")
# install.packages("VennDiagram")
library(ChIPpeakAnno)
library(biomaRt)
library(Biostrings)
#biocLite("BSgenome.Athaliana.TAIR.TAIR9")
library(BSgenome.Athaliana.TAIR.TAIR9)
genome <- BSgenome.Athaliana.TAIR.TAIR9
ensmart = useMart('ENSEMBL_MART_PLANT', "athaliana_eg_gene")
df.CNS_2014 <- read.table("../Datasets/novelTFmotifs/AllFootPrintsFDR0.10_scores.bed", header = FALSE, sep = "\t", skip = 1, stringsAsFactors = FALSE)
names(df.CNS_2014) <- c("Chr", "start", "end", "val", "cons_score")
g1.r <- BED2RangedData(df.CNS_2014, header=FALSE)
annotatedData = getAnnotation(ensmart, featureType = "TSS")
annotatedPeaks = annotatePeakInBatch(g1.r, AnnotationData= annotatedData)
#annotatedPeaks <- annotatedPeaks[order(rownames(annotatedPeaks)),]
annotatedPeaks.sset = annotatedPeaks[!is.na(annotatedPeaks$distancetoFeature) &
annotatedPeaks$fromOverlappingOrNearest == "NearestStart" &
annotatedPeaks$distancetoFeature < 0 &
abs(annotatedPeaks$distancetoFeature) < v.promoter_length,]
df.CNS_2014["Target"] <- NA
df.CNS_2014["cns_sequence"] <- NA
df.CNS_2014["start_dist_to_TSS"] <- NA
df.CNS_2014["shortest_dist_to_TSS"] <- NA
df.CNS_2014["feature_position"] <- NA
for(i in 1:nrow(annotatedPeaks.sset)){
print(paste(i, " of", nrow(annotatedPeaks.sset)))
nr.row <- as.numeric(gsub("\\ .*", "", rownames(annotatedPeaks.sset)[i]))
chr <- df.CNS_2014$Chr[nr.row]
start <- df.CNS_2014$start[nr.row]
end <- df.CNS_2014$end[nr.row]
df.CNS_2014$Target[nr.row] <- as.character(annotatedPeaks.sset$feature[i])
genome_by_chrom <- DNAString(genome[[chr]])
df.CNS_2014$cns_sequence[nr.row] <- as.character(substring(genome_by_chrom,start,end))
df.CNS_2014$start_dist_to_TSS[nr.row] <- abs(as.numeric(annotatedPeaks.sset$distancetoFeature[i]))
df.CNS_2014$shortest_dist_to_TSS[nr.row] <- abs(as.numeric(annotatedPeaks.sset$shortestDistance[i]))
df.CNS_2014$feature_position[nr.row] <- as.character(annotatedPeaks.sset$insideFeature[i])
}
df.CNS_2014 <- subset(df.CNS_2014, !is.na(df.CNS_2014$cns_sequence))
saveRDS(df.CNS_2014, "Datasets/novelTFmotifs/df.CNS_2014.rds")
}else{
#df.CNS_2014 <- readRDS("Datasets/novelTFmotifs/df.CNS_2014_2000kb_sset.rds")
df.CNS_2014 <- readRDS("Datasets/novelTFmotifs/df.CNS_2014.rds")
}
}
perform_TFBS_mapping_CNS2014 <- function(vec.genes, v.th.bind_score = 4, nr.cores = 15){
print("compute scored CNS2014-GRN")
#install.packages("foreach")
library(foreach)
library(doMC)
registerDoMC(nr.cores)
lst.motifs <- get_cell_and_pnas_Paper_PWMs()
lst.pwm.motif <- lst.motifs[[1]]
df.motifs <- lst.motifs[[2]]
df.CNS_2014 <- readRDS("Datasets/novelTFmotifs/df.CNS_2014.rds")
df.CNS_2014 <- subset(df.CNS_2014, df.CNS_2014$Target %in% vec.genes)
# df.CNS_2014 <- readRDS(paste("Datasets/novelTFmotifs/df.CNS_2014_2000kb.rds", sep = ""))
#df.CNS_2014 <- subset(df.CNS_2014, df.CNS_2014$end_dist_to_TSS < 1000)
vec.cns.sequences <- as.character(unique(df.CNS_2014$cns_sequence))
n.cns.seq <- length(vec.cns.sequences)
#system.time(for(i in 1:1){#length(lst.pwm.motif)){
#foreach(i = 1:2) %dopar% {
foreach(i = 1:length(lst.pwm.motif)) %dopar% {
print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
df.grn.CNS2014.TFB_map <- data.frame(TF = character(), cns_sequence = character(), TF.Motif = character(),
bind_score = numeric(), cmp_bind_score = numeric(), BS_pos_to_TSS = numeric(),
stringsAsFactors = FALSE)
names(df.grn.CNS2014.TFB_map) <- c("TF", "cns_sequence", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_in_CNS")
pcm <- round(100 * lst.pwm.motif[[i]])
for(j in 1:n.cns.seq){
hits <- matchPWM(pcm, vec.cns.sequences[j], with.score = TRUE)
nhits <- length(hits)
if(nhits >= 1){
cmp_bind_score <- min(mcols(hits)$score / maxScore(pcm)) # should be >= 0.8
motif.score <- mcols(hits)$score / 100
if(cmp_bind_score >= 0.8){
for(k in 1:nhits){
if(motif.score[k] >= v.th.bind_score){
newrow <- data.frame(TF = as.character(df.motifs$TF.locus[i]),
cns_sequence = vec.cns.sequences[j],
TF.Motif = names(lst.pwm.motif)[i],
bind_score = motif.score[k],
cmp_bind_score = cmp_bind_score,
BS_pos_in_CNS = abs(end(hits)[k] + start(hits)[k])/2,
stringsAsFactors = FALSE)
df.grn.CNS2014.TFB_map <- rbind(df.grn.CNS2014.TFB_map, newrow)
}
}
}
}
}
names(df.grn.CNS2014.TFB_map) <- c("TF", "cns_sequence", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_in_CNS")
saveRDS(df.grn.CNS2014.TFB_map, paste("Datasets/CNS_GRNS/tmp/df.cns2014_grn_map_",i,".rds", sep = ""))
}
# combine
df.grn.CNS2014.TFB_map <- data.frame(TF = character(), cns_sequence = character(), TF.Motif = character(),
bind_score = numeric(), cmp_bind_score = numeric(),
stringsAsFactors = FALSE)
for(i in 1:length(lst.pwm.motif)){
print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
df.grn.CNS2014.TFB_map <- rbind(df.grn.CNS2014.TFB_map, readRDS(paste("Datasets/CNS_GRNS/tmp/df.cns2014_grn_map_",i,".rds", sep = "")))
}
names(df.grn.CNS2014.TFB_map) <- c("TF", "cns_sequence", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_in_CNS")
#saveRDS(df.grn.CNS2014.TFB_map, paste("Datasets/CNS_GRNS/df.grn.CNS2014.TFB_map.rds", sep = ""))
saveRDS(df.grn.CNS2014.TFB_map, paste("Datasets/CNS_GRNS/df.grn.CNS2014.TFB_map_complete.rds", sep = ""))
# finalize
df.grn.CNS2014.TFB_map <- readRDS(paste("Datasets/CNS_GRNS/df.grn.CNS2014.TFB_map_complete.rds", sep = ""))
df.grn.CNS2014.TFB_map <- df.grn.CNS2014.TFB_map[,c(-3,-5)]
df.grn.CNS2014.TFB_map <- unique(df.grn.CNS2014.TFB_map)
#df.CNS_2014["BS_pos_to_TSS"] <- apply(df.CNS_2014[,c('start_dist_to_TSS','end_dist_to_TSS')], 1, function(x) mean(x) )
#df.CNS_2014 <- df.CNS_2014[, c(-1,-2,-3,-8,-9,-10)]
df.CNS_grn <- merge(df.grn.CNS2014.TFB_map, df.CNS_2014, by = "cns_sequence")
df.CNS_grn["BS_pos_to_TSS"] <- NA
for(i in 1:nrow(df.CNS_grn)){
print(paste(i, "of", nrow(df.CNS_grn)))
if(df.CNS_grn$start_dist_to_TSS[i] == df.CNS_grn$shortest_dist_to_TSS[i]){
df.CNS_grn$BS_pos_to_TSS[i] <- df.CNS_grn$shortest_dist_to_TSS[i] + df.CNS_grn$BS_pos_in_CNS[i]
}else{
df.CNS_grn$BS_pos_to_TSS[i] <- df.CNS_grn$start_dist_to_TSS[i] - df.CNS_grn$BS_pos_in_CNS[i]
}
}
saveRDS(df.CNS_grn, paste("Datasets/CNS_GRNS/df.CNS_grn2014_complete.rds", sep = ""))
vec.sequences <- unique(df.CNS_grn$cns_sequence)
foreach(i = 1:length(vec.sequences)) %dopar% {
#for(i in 1:length(vec.sequences)){
df.CNS_grn.filtered <- data.frame(TF = character(), bind_score = numeric(),
val = numeric(), cons_score = numeric(), Target = character(),
BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
print(paste(i, "of",length(vec.sequences)))
df.sset <- subset(df.CNS_grn, df.CNS_grn$cns_sequence == vec.sequences[i])
if(nrow(df.sset) > 0){
tfs <- unique(df.sset$TF)
for(t.1 in 1:length(tfs)){
df.sset.2 <- subset(df.sset, df.sset$TF == tfs[t.1])
targs <- unique(df.sset.2$Target)
for(t.2 in 1:length(targs)){
df.sset.3 <- subset(df.sset.2, df.sset.2$Target == targs[t.2])
vec.pos <- unique(df.sset.3$shortest_dist_to_TSS)
for(p in 1:length(vec.pos)){
df.sset.4 <- subset(df.sset.3, df.sset.3$shortest_dist_to_TSS == vec.pos[p])
df.sset.4 <- subset(df.sset.4, df.sset.4$bind_score == max(unique(df.sset.4$bind_score))) # could be more then one... filter
df.CNS_grn.filtered <- rbind(df.CNS_grn.filtered, df.sset.4[1,])
}
}
}
}
saveRDS(df.CNS_grn.filtered, paste("Datasets/CNS_GRNS/tmp/df.cns2014_grn_filtered_",i,".rds", sep = ""))
}
df.CNS_grn.filtered <- data.frame(cns_sequence = character(), TF = character(), bind_score = numeric(), BS_pos_in_CNS = numeric(),
Chr = numeric(), start = numeric(), end = numeric(), val = numeric(), cons_score = numeric(), Target = character(),
start_dist_to_TSS = numeric(), shortest_dist_to_TSS = numeric(), feature_position = character(), BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
for(i in 1:length(vec.sequences)){
print(paste(i, "of", length(vec.sequences)))
df.CNS_grn.filtered <- rbind(df.CNS_grn.filtered, readRDS(paste("Datasets/CNS_GRNS/tmp/df.cns2014_grn_filtered_",i,".rds", sep = "")))
}
names(df.CNS_grn.filtered) <- c("cns_sequence", "TF", "bind_score", "BS_pos_in_CNS", "Chr", "start", "end", "val",
"cons_score", "Target", "start_dist_to_TSS", "shortest_dist_to_TSS", "feature_position", "BS_pos_to_TSS")
saveRDS(df.CNS_grn.filtered, paste("Datasets/CNS_GRNS/df.CNS_grn2014_filtered.rds", sep = ""))
}
exact_CNS2014map_seq_motifs <- function(vec.genes, v.th.seqMotif_length = 4, nr.cores = 15){
library(GenomicFeatures)
library(biomaRt)
library(Biostrings)
library(foreach)
library(doMC)
registerDoMC(nr.cores)
df.seqMotifs <- read.table("Datasets/novelTFmotifs/athamap/athmap_seqMotifs.txt", header = TRUE, stringsAsFactor = FALSE, sep = "\t")
df.seqMotifs <- subset(df.seqMotifs, nchar(df.seqMotifs$verified_binding_seq) >= v.th.seqMotif_length)
df.seqMotifs$TF.locus <- toupper(df.seqMotifs$TF.locus)
df.seqMotifs$TF.locus <- unlist(sapply(df.seqMotifs$TF.locus, function(x) gsub(" ","",x, fixed=TRUE)))
df.seqMotifs <- subset(df.seqMotifs, !df.seqMotifs$TF.locus %in% vec.regs.novel.matrix)
dict_seq_motifs <- DNAStringSet(df.seqMotifs$verified_binding_seq)
df.CNS_2014 <- readRDS("Datasets/novelTFmotifs/df.CNS_2014.rds")
df.CNS_2014 <- subset(df.CNS_2014, df.CNS_2014$Target %in% vec.genes)
# df.CNS_2014 <- readRDS(paste("Datasets/novelTFmotifs/df.CNS_2014_2000kb.rds", sep = ""))
#df.CNS_2014 <- subset(df.CNS_2014, df.CNS_2014$end_dist_to_TSS < 1000)
vec.cns.sequences <- as.character(unique(df.CNS_2014$cns_sequence))
n.cns.seq <- length(vec.cns.sequences)
#pdict_seq_motifs <- PDict(dict_seq_motifs)
#foreach(i = 1:nrow(df.CNS_2014)) %dopar% {
for(i in 1:nrow(df.CNS_2014)){
df.CNS_grn.seqMotifs <- data.frame(TF = character(), cns_sequence = character(), Target = character(), cons_score = numeric(), val = numeric(), BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
print(paste(i,"of",nrow(df.CNS_2014)))
Target <- df.CNS_2014$Target[i]
# find these motifs in promoters - get genes
motifMatches <- matchPDict(dict_seq_motifs, DNAString(toString(df.CNS_2014$cns_sequence[i])),max.mismatch=0, min.mismatch=0)
idxes <- countIndex(motifMatches)
# how many per promoter and position -
for(j in 1:length(idxes)){
if(idxes[j] > 0){
for(k in 1:idxes[j]){
# SNP position in promoter
mean_dist_to_TSS <- round(mean(c(endIndex(motifMatches)[[j]][k], startIndex(motifMatches)[[j]][k])))
if(df.CNS_2014$start_dist_to_TSS[i] == df.CNS_2014$shortest_dist_to_TSS[i]){
BS_pos_to_TSS <- df.CNS_2014$shortest_dist_to_TSS[i] + mean_dist_to_TSS
}else{
BS_pos_to_TSS <- df.CNS_2014$start_dist_to_TSS[i] - mean_dist_to_TSS
}
newrow <- data.frame(TF = df.seqMotifs$TF.locus[j], cns_sequence = df.CNS_2014$cns_sequence[i], Target = Target,
cons_score = df.CNS_2014$cons_score[i], val = df.CNS_2014$val[i], BS_pos_to_TSS = BS_pos_to_TSS)
df.CNS_grn.seqMotifs <- rbind(df.CNS_grn.seqMotifs, newrow)
}
}
}
names(df.CNS_grn.seqMotifs) <- c("TF", "cns_sequence", "Target", "cons_score", "val", "BS_pos_to_TSS")
saveRDS(df.CNS_grn.seqMotifs, paste("Datasets/CNS_GRNS/tmp/df.CNS_grn.seqMotifs_",i,".rds", sep = ""))
}
df.CNS_grn.seqMotifs <- data.frame(TF = character(), Target = character(), bind_score = numeric(), BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
for(i in 1:nrow(df.CNS_2014)){
print(paste(i, "of", nrow(df.CNS_2014)))
df.CNS_grn.seqMotifs <- rbind(df.CNS_grn.seqMotifs, readRDS(paste("Datasets/CNS_GRNS/tmp/df.CNS_grn.seqMotifs_",i,".rds", sep = "")))
}
names(df.CNS_grn.seqMotifs) <- c("TF", "cns_sequence", "Target", "cons_score", "val", "BS_pos_to_TSS")
df.CNS_grn.seqMotifs <- unique(df.CNS_grn.seqMotifs)
saveRDS(df.CNS_grn.seqMotifs, paste("Datasets/CNS_GRNS/df.CNS_grn2014_SeqMotifs_filtered.rds", sep = ""))
}
perform_TFBS_mapping_SNP <- function(vec.genes, v.th.bind_score = 4, nr.cores = 15){
print("compute scored all gene GRN")
library(GenomicFeatures)
library(biomaRt)
library(Biostrings)
library(foreach)
library(doMC)
registerDoMC(nr.cores)
#df.snp_motifs <- read.table("Datasets/novel_snp_500_motifs.txt", header = FALSE, sep = "\t", quote = "", stringsAsFactor = FALSE)
df.founds <- read.table("Datasets/novel_snp_500_motifs_promoter_matches.txt",sep = "\t", header = TRUE, stringsAsFactors = FALSE)
vec.snp.sequences <- as.character(unique(df.founds$ext.motif))
n.snp.seq <- length(vec.snp.sequences)
lst.motifs <- get_cell_and_pnas_Paper_PWMs()
lst.pwm.motif <- lst.motifs[[1]]
df.motifs <- lst.motifs[[2]]
print("perform TFB - SNP mapping")
foreach(i = 1:length(lst.pwm.motif)) %dopar% {
#system.time(for(i in 1:1){#length(lst.pwm.motif)){
print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
df.grn.snp <- data.frame(TF = character(), Target = character(), TF.Motif = character(),
bind_score = numeric(), cmp_bind_score = numeric(), BS_pos_in_SNP = numeric(),
stringsAsFactors = FALSE)
names(df.grn.snp) <- c("TF", "snp_sequence", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_in_SNP")
pcm <- round(100 * lst.pwm.motif[[i]])
###
for(j in 1:n.snp.seq){
print(j)
hits <- matchPWM(pcm, vec.snp.sequences[j], with.score = TRUE)
nhits <- length(hits)
if(nhits >= 1){
cmp_bind_score <- min(mcols(hits)$score / maxScore(pcm)) # should be >= 0.8
motif.score <- mcols(hits)$score / 100
if(cmp_bind_score >= 0.8){
for(k in 1:nhits){
if(motif.score[k] >= v.th.bind_score){
newrow <- data.frame(TF = as.character(df.motifs$TF.locus[i]),
snp_sequence = vec.snp.sequences[j],
TF.Motif = names(lst.pwm.motif)[i],
bind_score = motif.score[k],
cmp_bind_score = cmp_bind_score,
BS_pos_in_SNP = abs(end(hits)[k] + start(hits)[k])/2,
stringsAsFactors = FALSE)
df.grn.snp <- rbind(df.grn.snp, newrow)
}
}
}
}
}
names(df.grn.snp) <- c("TF", "snp_sequence", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_in_SNP")
saveRDS(df.grn.snp, paste("Datasets/SNP/tmp/df.grn.snp_",i,".rds", sep = ""))
}
# combine
df.grn.snp <- data.frame(TF = character(), snp_sequence = character(), TF.Motif = character(),
bind_score = numeric(), cmp_bind_score = numeric(), BS_pos_in_SNP = numeric(),
stringsAsFactors = FALSE)
for(i in 1:length(lst.pwm.motif)){
print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
df.grn.snp <- rbind(df.grn.snp, readRDS(paste("Datasets/SNP/tmp/df.grn.snp_",i,".rds", sep = "")))
}
names(df.grn.snp) <- c("TF", "snp_sequence", "TF.Motif", "bind_score", "cmp_bind_score", "BS_pos_in_SNP")
df.grn.snp <- df.grn.snp[,c(-3,-5)]
df.grn.snp <- unique(df.grn.snp)
#df.CNS_2014["BS_pos_to_TSS"] <- apply(df.CNS_2014[,c('start_dist_to_TSS','end_dist_to_TSS')], 1, function(x) mean(x) )
#df.CNS_2014 <- df.CNS_2014[, c(-1,-2,-3,-8,-9,-10)]
names(df.founds)[1] <- "Target"
names(df.founds)[3] <- "snp_sequence"
df.SNP_grn <- merge(df.grn.snp, df.founds, by = "snp_sequence")
df.SNP_grn["BS_pos_to_TSS"] <- df.SNP_grn$start_dist_to_TSS - df.SNP_grn$BS_pos_in_SNP
#filter
foreach(i = 1:length(vec.snp.sequences)) %dopar% {
# for(i in 1:length(vec.snp.sequences)){
df.SNP_grn.filtered <- data.frame(snp_sequence =character(), TF = character(), bind_score = numeric(), BS_pos_in_SNP = numeric(), Target = character(),
snp.motif = character(),start_dist_to_TSS = numeric(), mean_dist_to_TSS = numeric(),BS_pos_to_TSS = numeric(),
stringsAsFactors = FALSE)
print(paste(i, "of",length(vec.snp.sequences)))
df.sset <- subset(df.SNP_grn, df.SNP_grn$snp_sequence == vec.snp.sequences[i])
if(nrow(df.sset) > 0){
tfs <- unique(df.sset$TF)
for(t.1 in 1:length(tfs)){
df.sset.2 <- subset(df.sset, df.sset$TF == tfs[t.1])
targs <- unique(df.sset.2$Target)
for(t.2 in 1:length(targs)){
df.sset.3 <- subset(df.sset.2, df.sset.2$Target == targs[t.2])
vec.pos <- unique(df.sset.3$start_dist_to_TSS)
for(p in 1:length(vec.pos)){
df.sset.4 <- subset(df.sset.3, df.sset.3$start_dist_to_TSS == vec.pos[p])
df.sset.4 <- subset(df.sset.4, df.sset.4$bind_score == max(unique(df.sset.4$bind_score)))
df.SNP_grn.filtered <- rbind(df.SNP_grn.filtered, df.sset.4[1,])
}
}
}
}
saveRDS(df.SNP_grn.filtered, paste("Datasets/SNP/tmp/df.grn.snp_filtered_",i,".rds", sep = ""))
}
df.SNP_grn.filtered <- data.frame(snp_sequence =character(), TF = character(), bind_score = numeric(), BS_pos_in_SNP = numeric(), Target = character(),
snp.motif = character(),start_dist_to_TSS = numeric(), mean_dist_to_TSS = numeric(),BS_pos_to_TSS = numeric(),
stringsAsFactors = FALSE)
for(i in 1:length(vec.snp.sequences)){
print(paste(i, "of", length(vec.snp.sequences)))
df.SNP_grn.filtered <- rbind(df.SNP_grn.filtered, readRDS(paste("Datasets/SNP/tmp/df.grn.snp_filtered_",i,".rds", sep = "")))
}
names(df.SNP_grn.filtered) <- c("snp_sequence", "TF", "bind_score", "BS_pos_in_SNP", "Target", "snp.motif", "start_dist_to_TSS", "mean_dist_to_TSS", "BS_pos_to_TSS")
saveRDS(df.SNP_grn.filtered, paste("Datasets/SNP/df.grn.snp_filtered.rds", sep = ""))
#saveRDS(df.SNP_grn, paste("Datasets/SNP/df.grn.snp.rds", sep = ""))
}
perform_cMonkey_TFBS_mapping <- function(v.th.eVal_pwm_match = 0.05, n.top.matching.motifs = 15, nr.cores = 15, load_from_file = FALSE){
# recompute the biclustering (later... )
library(cMonkey)
library(MotIV)
library(foreach)
library(doMC)
registerDoMC(nr.cores)
n.env.cMonkey <- 5
lst.env.cMonkey <- vector(mode = "list", length = n.env.cMonkey)
lst.env.cMonkey[[1]] <- readRDS("Datasets/BiClust_GRNS/env.cMonkey.development.rds")
lst.env.cMonkey[[2]] <- readRDS("Datasets/BiClust_GRNS/env.cMonkey.perturbation_CoSpecificity.rds")
lst.env.cMonkey[[3]] <- readRDS("Datasets/BiClust_GRNS/env.cMonkey.perturbation_abiotic.rds")
lst.env.cMonkey[[4]] <- readRDS("Datasets/BiClust_GRNS/env.cMonkey.perturbation_biotic.rds")
lst.env.cMonkey[[5]] <- readRDS("Datasets/BiClust_GRNS/env.cMonkey.perturbation_hormone.rds")
lst.motifs <- get_cell_and_pnas_Paper_PWMs()
lst.pwm.motif <- lst.motifs[[1]]
df.motifs <- lst.motifs[[2]]
lst.pwm.motif <- lapply(lst.pwm.motif, function(x) round(100 * x))
pwm.scores <- generateDBScores(inputDB=lst.pwm.motif,cc="PCC",align="SWU",nRand=1000)
foreach(e = 1:n.env.cMonkey) %dopar% {
#for(e in 1:n.env.cMonkey){
env.cMonkey <- lst.env.cMonkey[[e]]
# mat.GE <- env.cMonkey$ratios$ratios
# genes <- rownames(mat.GE)
#conditions <- colnames(mat.GE)
df.motifs.cMonkey <- env.cMonkey$cluster.summary()
n.cmonkey_motifs <- 2
for(c in 1:env.cMonkey$k.clust){
df.grn.biclust.TFB_map <- data.frame(TF = character(), Target = character(), TF.Motif = character(), Rank.TF.Motif = numeric(), biclust_id = numeric(),
nr.biclust_motif = numeric(), e.val.Motif = numeric(), p.val_per_gene = numeric(), e.val.TFB_similarity = numeric(),
BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
print(paste(c,"of",env.cMonkey$k.clust))
cluster.genes <- env.cMonkey$get.rows(c) # get genes
cluster.conditions <- env.cMonkey$get.cols(c) # get conditions
if(!is.null(env.cMonkey$meme.scores[[1]][[c]]$meme.out[[1]]$pssm)){
# PWM per cluster
lst.pwms <- vector(mode = "list", length = n.cmonkey_motifs)
for(i in 1:length(lst.pwms)){
lst.pwms[[i]] <- t(env.cMonkey$meme.scores[[1]][[c]]$meme.out[[i]]$pssm)
rownames(lst.pwms[[i]]) <- c("A","C","G","T")
colnames(lst.pwms[[i]]) <- seq(1:ncol(lst.pwms[[i]]))
}
lst.pwms <- lapply(lst.pwms,trimPWMedge, threshold=1)
names(lst.pwms) <- c("pwm1", "pwm2")
# genes, distances to TSS, gene specific p values, motif evalue
# top 2 matching matrices
res <- motifMatch(inputPWM = lst.pwms, database = lst.pwm.motif, DBscores = pwm.scores, top = n.top.matching.motifs)
for(i in 1:length(lst.pwms)){
n.matchin.TF_pwms <- length(res@bestMatch[[i]]@aligns)
if(n.matchin.TF_pwms > 0){
e.val <- env.cMonkey$meme.scores[[1]][[c]]$meme.out[[i]]$e.value
genes <- as.character(env.cMonkey$meme.scores[[1]][[c]]$meme.out[[i]]$posns$gene)
#as.numeric(env.cMonkey$meme.scores[[1]][[c]]$meme.out[[i]]$posns$strand)
dist_to_TSS_per_gene <- as.numeric(env.cMonkey$meme.scores[[1]][[c]]$meme.out[[i]]$posns$start) # distance to start
p.val_per_gene <- as.numeric(env.cMonkey$meme.scores[[1]][[c]]$meme.out[[i]]$posns$p.value) # gene based p value
for(k in 1:n.matchin.TF_pwms){
motif.id <- res@bestMatch[[i]]@aligns[[k]]@TF@name
e.val.similarity <- res@bestMatch[[i]]@aligns[[k]]@evalue
idx <- match(motif.id, df.motifs$Motif.ID)
for(l in 1:length(genes)){
newrow <- data.frame(TF = df.motifs$TF.locus[idx],
Target = genes[l],
TF.Motif = motif.id,
Rank.TF.Motif = k,
biclust_id = c,
nr.biclust_motif = i,
e.val.TF.Motif = e.val,
p.val_per_gene = p.val_per_gene[l],
e.val.TFB_similarity = e.val.similarity,
BS_pos_to_TSS = dist_to_TSS_per_gene[l],
stringsAsFactors = FALSE)
df.grn.biclust.TFB_map <- rbind(df.grn.biclust.TFB_map, newrow)
}
}
}
}
}
names(df.grn.biclust.TFB_map) <- c("TF", "Target", "TF.Motif", "Rank.TF.Motif", "biclust_id", "nr.biclust_motif", "e.val.TF.Motif",
"p.val_per_gene", "e.val.TFB_similarity", "BS_pos_to_TSS")
df.grn.biclust.TFB_map.filtered <- data.frame(TF = character(), Target = character(), TF.Motif = character(), Rank.TF.Motif = numeric(), biclust_id = numeric(),
nr.biclust_motif = numeric(), e.val.Motif = numeric(), p.val_per_gene = numeric(), e.val.TFB_similarity = numeric(),
BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
if(nrow(df.grn.biclust.TFB_map) > 0){
df.grn.biclust.TFB_map <- subset(df.grn.biclust.TFB_map, df.grn.biclust.TFB_map$e.val.TFB_similarity <= v.th.eVal_pwm_match)
for(m in 1:n.cmonkey_motifs){
df.sset <- subset(df.grn.biclust.TFB_map, df.grn.biclust.TFB_map$nr.biclust_motif == m)
tfs <- unique(df.sset$TF)
for(t in 1:length(tfs)){
df.sset.2 <- subset(df.sset, df.sset$TF == tfs[t])
targs <- unique(df.sset.2$Target)
for(l in 1:length(targs)){
df.sset.3 <- subset(df.sset.2, df.sset.2$Target == targs[l])
df.sset.3 <- subset(df.sset.3, df.sset.3$e.val.TFB_similarity == min(unique(df.sset.3$e.val.TFB_similarity)))
df.grn.biclust.TFB_map.filtered <- rbind(df.grn.biclust.TFB_map.filtered, df.sset.3[1,])
}
}
}
names(df.grn.biclust.TFB_map.filtered) <- c("TF", "Target", "TF.Motif", "Rank.TF.Motif", "biclust_id", "nr.biclust_motif", "e.val.TF.Motif",
"p.val_per_gene", "e.val.TFB_similarity", "BS_pos_to_TSS")
saveRDS(df.grn.biclust.TFB_map.filtered, paste("Datasets/BiClust_GRNS/tmp/df.grn.biclust.TFB_map_",e,"_",c,".rds", sep = ""))
}
}
# combine
df.grn.biclust.TFB_map <- data.frame(TF = character(), Target = character(), TF.Motif = character(), Rank.TF.Motif = numeric(), biclust_id = numeric(),
nr.biclust_motif = numeric(), e.val.Motif = numeric(), p.val_per_gene = numeric(), e.val.TFB_similarity = numeric(),
BS_pos_to_TSS = numeric(), stringsAsFactors = FALSE)
for(c in 1:env.cMonkey$k.clust){
print(paste(c,"of",env.cMonkey$k.clust))
df.grn.biclust.TFB_map <- rbind(df.grn.biclust.TFB_map, readRDS(paste("Datasets/BiClust_GRNS/tmp/df.grn.biclust.TFB_map_",e,"_",c,".rds", sep = "")))
}
names(df.grn.biclust.TFB_map) <- c("TF", "Target", "TF.Motif", "Rank.TF.Motif", "biclust_id", "nr.biclust_motif", "e.val.TF.Motif",
"p.val_per_gene", "e.val.TFB_similarity", "BS_pos_to_TSS")
saveRDS(df.grn.biclust.TFB_map, paste("Datasets/BiClust_GRNS/df.grn.biclust.TFB_map_",e,".rds", sep = ""))
}
# n.env.cMonkey <- 5
# vec.global_condition <- c("development, abiotic, biotic, hormone, tissue_specifity")
# lst.df.grn.biclust.TFB_map <- vector(mode = "list", length = n.env.cMonkey)
# for(e in 1:n.env.cMonkey){
# readRDS(paste("Datasets/BiClust_GRNS/df.grn.biclust.TFB_map_",e,".rds", sep = ""))
#
# }
}
## REMAINING STUFF
compute_transitive_closure <- function(){
vec.genes <- unique(c(vec.tfs, vec.tgs))
#df.grn.biclust.TFB_map <- readRDS(paste("Datasets/BiClust_GRNS/df.grn.biclust.TFB_map_",e,".rds", sep = ""))
#df.SNP_grn.filtered <- readRDS(paste("Datasets/SNP/df.grn.snp_filtered.rds", sep = ""))
df.CNS_grn.seqMotifs <- readRDS(paste("../Datasets/CNS_GRNS/df.CNS_grn2014_SeqMotifs_filtered.rds", sep = ""))
df.CNS_grn.filtered <- readRDS(paste("../Datasets/CNS_GRNS/df.CNS_grn2014_filtered.rds", sep = ""))
df.grn.all_motifs_filtered <- readRDS(paste("../Datasets/novelTFmotifs/df.grn.all_motifs_filtered", sep = ""))
df.grn.seqMotifs <- readRDS("../Datasets/novelTFmotifs/df.grn.seqMotifs.rds")
df.grn.CNS <- readRDS(paste("../Datasets/GRMs/df.grn.CNS.rds", sep = ""))
### Conserved Binding
# df.CNS_grn.filtered <- readRDS(paste("Datasets/CNS_GRNS/df.CNS_grn2014_filtered.rds", sep = ""))
# df.CNS_grn.filtered <- df.CNS_grn.filtered[,c(2,10,3)]
# df.CNS_grn.filtered <- subset(df.CNS_grn.filtered, df.CNS_grn.filtered$bind_score >= 6)
# df.CNS_grn.filtered <- df.CNS_grn.filtered[,c(1:2)]
#
# df.CNS_grn.seqMotifs <- df.CNS_grn.seqMotifs[,c(1,3)]
# df.CNS_grn.seqMotifs <- unique(df.CNS_grn.seqMotifs)
#
# df.grn.CNS <- rbind(df.grn.CNS, df.CNS_grn.seqMotifs)
# df.grn.CNS <- rbind(df.grn.CNS, df.CNS_grn.filtered)
# df.grn.CNS <- unique(df.grn.CNS)
#
#
# df.grn.CNS <- subset(df.grn.CNS, df.grn.CNS$TF %in% rownames(mat.grn) & df.grn.CNS$Target %in% vec.genes)
# mat.grn.tfb.CNS <- matrix(0, nrow = length(vec.genes), ncol = length(vec.genes),
# dimnames = list(vec.genes, vec.genes))
#
# for(i in 1:nrow(df.grn.CNS)){
# print(paste(i, "of", nrow(df.grn.CNS)))
# tf <- df.grn.CNS$TF[i]
# tg <- df.grn.CNS$Target[i]
# #if(mat.grn.tfb[tf, tg] < df.grn.TFB.combined$p.TFB[i]){
# mat.grn.tfb.CNS[tf, tg] <- 1 #df.grn.TFB.combined$p.TFB[i]
# #}
# }
# saveRDS(mat.grn.tfb.CNS, "Datasets/workspace/mat.grn.tfb.CNS.all_8.rds")
#mat.grn.tfb.CNS <- readRDS("Datasets/workspace/mat.grn.tfb.CNS.rds")
#mat.grn.tfb <- readRDS("Datasets/workspace/mat.grn.tfb.probs_above_95.rds")
#mat.grn.tfb.CNS[(!rownames(mat.grn.tfb.CNS) %in% vec.TF_with_TFB), ] <- 0.5
#### Binding probability
df.grn.all_motifs_filtered <- df.grn.all_motifs_filtered[,c(1,2,3)]
df.CNS_grn.seqMotifs["bind_score"] <- 8
df.CNS_grn.seqMotifs <- df.CNS_grn.seqMotifs[,c(1,3,7)]
df.CNS_grn.seqMotifs <- unique(df.CNS_grn.seqMotifs)
df.grn.seqMotifs["bind_score"] <- 7
df.grn.seqMotifs <- df.grn.seqMotifs[,c(1,2,4)]
df.grn.seqMotifs <- unique(df.grn.seqMotifs)
df.grn.CNS["bind_score"] <- 8
df.grn.seqMotifs <- rbind(df.grn.seqMotifs, df.grn.CNS)
df.grn.seqMotifs <- rbind(df.grn.seqMotifs, df.CNS_grn.seqMotifs)
df.grn.all_motifs_filtered <- rbind(df.grn.all_motifs_filtered, df.grn.seqMotifs)
x <- df.grn.all_motifs_filtered$bind_score
P = ecdf(x)
df.grn.TFB.combined <- df.grn.all_motifs_filtered
df.grn.TFB.combined["p.TFB"] <- P(df.grn.all_motifs_filtered$bind_score)
df.grn.TFB.combined <- subset(df.grn.TFB.combined, df.grn.TFB.combined$TF %in% vec.tfs & df.grn.TFB.combined$Target %in% vec.genes)
df.grn.TFB.combined <- subset(df.grn.TFB.combined, df.grn.TFB.combined$p.TFB >= P(7))
df.grn.TFB.combined <- unique(df.grn.TFB.combined[,c(1,2)])
vec.regs.tfb <- unique(df.grn.TFB.combined$TF)
vec.tgs.tfb <- unique(df.grn.TFB.combined$Target)
mat.grn.tfb <- matrix(0, nrow = length(vec.genes), ncol = length(vec.genes),
dimnames = list(vec.genes, vec.genes))
for(i in 1:nrow(df.grn.TFB.combined)){
print(paste(i, "of", nrow(df.grn.TFB.combined)))
tf <- df.grn.TFB.combined$TF[i]
tg <- df.grn.TFB.combined$Target[i]
mat.grn.tfb[tf, tg] <- 1 #df.grn.TFB.combined$p.TFB[i]
}
saveRDS(mat.grn.tfb, "Datasets/workspace/mat.grn.tfb.probs_above_95.rds")
#mat.grn.tfb <- readRDS("Datasets/workspace/mat.grn.tfb.probs_above_95.rds")
#mat.grn.tfb[(!rownames(mat.grn.tfb) %in% vec.TF_with_TFB), ] <- 0.5
#mat.grn.tfb <- readRDS("Datasets/workspace/mat.grn.tfb.probs_above_95.rds")
#mat.grn.tfb[(!rownames(mat.grn.tfb) %in% vec.TF_with_TFB), ] <- 0.5
###
df.grn.CNS <- subset(df.grn.CNS, df.grn.CNS$TF %in% rownames(mat.grn) & df.grn.CNS$Target %in% vec.genes)
mat.grn.tfb.CNS <- matrix(0, nrow = length(vec.genes), ncol = length(vec.genes),
dimnames = list(vec.genes, vec.genes))
for(i in 1:nrow(df.grn.CNS)){
print(paste(i, "of", nrow(df.grn.CNS)))
tf <- df.grn.CNS$TF[i]
tg <- df.grn.CNS$Target[i]
#if(mat.grn.tfb[tf, tg] < df.grn.TFB.combined$p.TFB[i]){
mat.grn.tfb.CNS[tf, tg] <- 1 #df.grn.TFB.combined$p.TFB[i]
#}
}
#saveRDS(mat.grn.tfb.CNS, "Datasets/workspace/mat.grn.tfb.CNS.rds")
mat.grn.tfb.CNS <- readRDS("Datasets/workspace/mat.grn.tfb.CNS.rds")
#mat.grn.tfb <- readRDS("Datasets/workspace/mat.grn.tfb.probs_above_95.rds")
mat.grn.tfb.CNS[(!rownames(mat.grn.tfb.CNS) %in% vec.TF_with_TFB), ] <- 0.5
#df.grn.all_motifs_filtered <- rbind(df.grn.all_motifs_filtered, df.CNS_grn.seqMotifs)
####
v.th.binding_cutoff <- 4.9
#df.CNS_grn.filtered <- subset(df.CNS_grn.filtered, df.CNS_grn.filtered$bind_score >= v.th.binding_cutoff)
#df.grn.all_motifs_filtered <- subset(df.grn.all_motifs_filtered, df.grn.all_motifs_filtered$bind_score >= v.th.binding_cutoff)
df.grn.TFB.combined <- df.grn.CNS
df.grn.TFB.combined <- rbind(df.grn.TFB.combined, df.CNS_grn.seqMotifs[,c(1,3)])
df.grn.TFB.combined <- rbind(df.grn.TFB.combined, df.CNS_grn.filtered[,c(2,10)])
df.grn.TFB.combined <- rbind(df.grn.TFB.combined, df.grn.all_motifs_filtered[,c(1,2)])
df.grn.TFB.combined <- rbind(df.grn.TFB.combined, df.grn.seqMotifs[,c(1,2)])
df.grn.TFB.combined <- unique(df.grn.TFB.combined)
# 4.9 cutoff
# + TFB (!!1/0/0.5 interconnected!!!)
#df.grn.TFB.combined <- readRDS(paste("Datasets/GRMs/df.grn.TFB.combined.rds", sep = ""))
vec.genes <- c(rownames(mat.grn), colnames(mat.grn))
df.grn.TFB.combined <- subset(df.grn.TFB.combined, df.grn.TFB.combined$TF %in% rownames(mat.grn) & df.grn.TFB.combined$Target %in% vec.genes)
vec.regs.tfb <- unique(df.grn.TFB.combined$TF)
vec.tgs.tfb <- unique(df.grn.TFB.combined$Target)
mat.grn.tfb <- matrix(0, nrow = length(vec.genes), ncol = length(vec.genes),
dimnames = list(vec.genes, vec.genes))
for(i in 1:nrow(df.grn.TFB.combined)){
print(paste(i, "of", nrow(df.grn.TFB.combined)))
tf <- df.grn.TFB.combined$TF[i]
tg <- df.grn.TFB.combined$Target[i]
mat.grn.tfb[tf, tg] <- 1
}
#saveRDS(mat.grn.tfb, "Datasets/workspace/mat.grn.tfb.rds")
#saveRDS(mat.grn.tfb, "Datasets/workspace/mat.grn.tfb.1.rds")
#saveRDS(mat.grn.tfb, "Datasets/workspace/mat.grn.tfb.2.rds")
###
mat.grn.TF_TF <- mat.grn.tfb[rownames(mat.grn), rownames(mat.grn)]
mat.grn.TF_TF.transitive <- mat.grn.TF_TF
for(y in 1:ncol(mat.grn.TF_TF)){
print(y)
tfs.y <- names(which(mat.grn.TF_TF[,y] == 1))
if(length(tfs.y) > 0){
for(x in 1:length(tfs.y)){
idx.tgs <- as.numeric(which(mat.grn.TF_TF[tfs.y[x], ] == 1))
mat.grn.TF_TF.transitive[tfs.y[x], idx.tgs] <- 1
}
}
}
#mat.grn <- mat.pred.grn
# 1 step transitive
mat.grn.TF_ME <- mat.grn.tfb[rownames(mat.grn), colnames(mat.grn)]
mat.grn.TF_ME.transitive <- mat.grn.TF_ME
for(y in 1:ncol(mat.grn.TF_TF.transitive)){
print(y)
tfs.y <- names(which(mat.grn.TF_TF.transitive[,y] == 1))
for(x in 1:length(tfs.y)){
idx.tgs <- as.numeric(which(mat.grn.TF_ME[tfs.y[x], ] == 1))
mat.grn.TF_ME.transitive[tfs.y[x], idx.tgs] <- 1
}
}
#mat.grn.TF_ME.transitive[(!rownames(mat.grn.TF_ME.transitive) %in% vec.TF_with_TFB), ] <- 0.5
saveRDS(mat.grn.TF_ME.transitive, "Datasets/workspace/mat.grn.TF_ME.transitive.2.rds")
#saveRDS(mat.grn.TF_ME.transitive, "Datasets/workspace/mat.grn.TF_ME.transitive.1.80.rds")
#saveRDS(mat.grn.TF_ME.transitive, "Datasets/workspace/mat.grn.TF_ME.transitive.1.95.rds")
######
vec.TF_with_TFB <- unique(df.grn.TFB.combined$TF)
mat.grn.TF_TF_with_TFB <- mat.grn.tfb[vec.TF_with_TFB, vec.TF_with_TFB]
library(agop)
strt<-Sys.time()
mat.grn.tfb.TC <- closure_transitive(mat.grn.TF_TF_with_TFB)
print(Sys.time()-strt)
#saveRDS(mat.grn.tfb.TC, "Datasets/workspace/mat.grn.tfb.TC.rds")
#saveRDS(mat.grn.tfb.TC, "Datasets/workspace/mat.grn.tfb.TC.1.rds")
# mark a connection
#mat.grn.TF_TF <- mat.grn.tfb[rownames(mat.grn), rownames(mat.grn)]
mat.grn.TF_ME <- mat.grn.tfb[rownames(mat.grn), colnames(mat.grn)]
mat.grn.TF_ME.transitive <- mat.grn.TF_ME
for(y in 1:ncol(mat.grn.tfb.TC)){
print(y)
tfs.y <- names(which(mat.grn.tfb.TC[,y] == 1))
for(x in 1:length(tfs.y)){
idx.tgs <- as.numeric(which(mat.grn.TF_ME[tfs.y[x], ] == 1))
mat.grn.TF_ME.transitive[tfs.y[x], idx.tgs] <- 1
}
}
mat.grn.TF_ME.transitive[(!rownames(mat.grn.TF_ME.transitive) %in% vec.TF_with_TFB), ] <- 0.5
# saveRDS(mat.grn.TF_ME.transitive, "Datasets/workspace/mat.grn.TF_ME.transitive.rds")
}
# ## gene filter before proceed
# df.CNS_2014_2000kb <- readRDS(paste("Datasets/novelTFmotifs/df.CNS_2014_2000kb.rds", sep = ""))
# df.CNS_2014_2000kb_sset <- subset(df.CNS_2014_2000kb, df.CNS_2014_2000kb$gene %in% vec.genes.considered)
# saveRDS(df.CNS_2014_2000kb_sset, "Datasets/novelTFmotifs/df.CNS_2014_2000kb_sset.rds")
#
# prepare_NCS2014_GRN <- function(load_from_file = FALSE){
#
# if(!load_from_file){
#
# #source("http://bioconductor.org/biocLite.R")
# #biocLite("PWMEnrich")
# library(PWMEnrich)
#
# print("compute scored CNS2014-GRN")
#
# df.CNS_2014 <- extract_CNS2014()
# lst.motifs <- get_cell_and_pnas_Paper_PWMs()
# lst.pwm.motif <- lst.motifs[[1]]
# df.motifs <- lst.motifs[[2]]
#
# lst.log_pwm.motif <- vector(mode = "list", length = length(lst.pwm.motif))
# for(i in 1:length(lst.pwm.motif)){
# log_pwm <- new("PWM", id = names(lst.pwm.motif)[i], name = df.motifs$TF.locus[i], pfm = lst.pwm.motif[[i]], prior.params = c(A = 0.25, C = 0.25, G = 0.25, T = 0.25), pwm = log2(lst.pwm.motif[[i]]))
# lst.log_pwm.motif[[i]] <- log_pwm
# }
#
# n.matches <- nrow(df.CNS_2014) * 3
# df.grn.CNS2014 <- data.frame(TF = character(n.matches), Target = character(n.matches), TF.Motif = character(n.matches),cons_score = numeric(n.matches), align.score = numeric(n.matches),
# bind_score = numeric(n.matches), p.val = numeric(n.matches), rank = numeric(n.matches), mean_dist_to_TSS = numeric(n.matches), stringsAsFactors = FALSE)
# names(df.grn.CNS2014) <- c("TF", "Target", "TF.Motif", "cons_score", "align_score", "bind_score", "p.val", "rank", "mean_dist_to_TSS")
#
# idx <- 1
# for(j in 1:nrow(df.CNS_2014)){
# print(paste(j, "of", nrow(df.CNS_2014)))
# sequence <- DNAString(as.character(df.CNS_2014$cns_sequence[j]))
# lst.log_pwm.motif.sset <- lst.log_pwm.motif
# for(l in 1:length(lst.log_pwm.motif.sset)){
# if(length(sequence) < ncol(lst.log_pwm.motif.sset[[l]]$pwm)){
# lst.log_pwm.motif.sset[[l]] <- character(0)
# }
# }
# lst.log_pwm.motif.sset <- Filter(length, lst.log_pwm.motif.sset)
# if(length(lst.log_pwm.motif.sset) > 0){
# res = motifEnrichment(sequence, lst.log_pwm.motif.sset, score = "affinity")
# report = sequenceReport(res, 1)
#
# for(t in 1:3){
# df.grn.CNS2014$TF[idx] <- report$target[t]
# df.grn.CNS2014$Target[idx] <- df.CNS_2014$gene[j]
# df.grn.CNS2014$TF.Motif[idx] <- report$id[t]
# df.grn.CNS2014$cons_score[idx] <- df.CNS_2014$cons_score[j]
# df.grn.CNS2014$align_score[idx] <- df.CNS_2014$val[j]
# df.grn.CNS2014$bind_score[idx] <- report$raw.score[t]
# df.grn.CNS2014$rank[idx] <- report$rank[t]
# df.grn.CNS2014$p.val[idx] <- report$p.value[t]
# df.grn.CNS2014$mean_dist_to_TSS[idx] <- mean(c(df.CNS_2014$start_dist_to_TSS[j], df.CNS_2014$end_dist_to_TSS[j]))
# idx <- idx + 1
# }
# }else{
# idx <- idx + 3
# }
# }
# df.grn.CNS2014 <- subset(df.grn.CNS2014, df.grn.CNS2014$TF != "")
# saveRDS(df.grn.CNS2014, paste("Datasets/CNS_GRNS/df.cns2014_grn.rds", sep = ""))
#
# }
#
#
#
# ### --...
#
# print("compute scored CNS2014-GRN")
#
# df.CNS_2014 <- extract_CNS2014()
#
# system.time(for(i in 1:1){#length(lst.pwm.motif)){
# print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
# df.grn.CNS2014 <- data.frame(TF = character(n.matches), Target = character(n.matches), TF.Motif = character(n.matches),
# cons_score = numeric(n.matches), align.score = numeric(n.matches),
# bind_score = numeric(n.matches), cmp_bind_score = numeric(n.matches),
# mean_dist_to_TSS = numeric(n.matches), stringsAsFactors = FALSE)
# names(df.grn.CNS2014) <- c("TF", "Target", "TF.Motif", "cons_score", "align_score", "bind_score", "cmp_bind_score", "mean_dist_to_TSS")
#
#
# for(j in 1:nrow(df.CNS_2014)){
# print(paste(j, "of", nrow(df.CNS_2014)))
# hits <- matchPWM(lst.pwm.motif[[i]], as.character(df.CNS_2014$cns_sequence[j]), with.score = TRUE)
# nhits <- length(hits)
#
#
# #test.seqs <- DNAStringSet(unique(df.CNS_2014$cns_sequence))
# #system.time(pwm.hits <- sapply(test.seqs, function(pseq) matchPWM(lst.pwm.motif[[i]], pseq, min.score="90%")))
# #scores = motifScores(sequence, test, raw.scores=TRUE)
# #head(scores[[1]])
# #res = motifEnrichment(sequence, lst.log_pwm.motif, score = "affinity")
# #report = sequenceReport(res, 1)
#
# if(nhits >= 1){
# cmp_bind_score <- min(mcols(hits)$score / maxScore(lst.pwm.motif[[i]])) # should be >= 0.8
# motif.score <- mcols(hits)$score
# if(cmp_bind_score >= 0.8){
# newrow <- data.frame(TF = as.character(df.motifs$TF.locus[i]), Target = as.character(df.CNS_2014$gene[j]),
# TF.Motif = lst.log_pwm.motif[[i]]$id,
# cons_score = as.numeric(df.CNS_2014$cons_score[j]),
# align_score <- df.CNS_2014$val[j],
# bind_score = motif.score,
# cmp_bind_score = cmp_bind_score,
# mean_dist_to_TSS = mean(c(df.CNS_2014$start_dist_to_TSS[j], df.CNS_2014$end_dist_to_TSS[j])),
# stringsAsFactors = FALSE)
# df.grn.CNS2014 <- rbind(df.grn.CNS2014, newrow)
# }
# }
# }
# names(df.grn.CNS2014) <- c("TF", "Target", "cons_score", "bind_score", "compared_bind_score", "start_dist", "short_dist")
# saveRDS(df.grn.CNS2014, paste("Datasets/CNS_GRNS/tmp/df.cns2014_grn_",i,".rds", sep = ""))
# })
# # combine
# df.grn.CNS2014 <- data.frame(TF = character(), Target = character(), cons_score = numeric(), bind_score = numeric(), cmp_bind_score = numeric(),
# start_dist = numeric(), short_dist = numeric(), stringsAsFactors = FALSE)
# for(i in 1:length(lst.pwm.motif)){
# df.grn.CNS2014 <- rbind(df.grn.CNS2014, readRDS(paste("Datasets/CNS_GRNS/tmp/df.cns2014_grn_",i,".rds", sep = "")))
# }
# names(df.grn.CNS2014) <- c("TF", "Target", "cons_score", "bind_score", "compared_bind_score", "start_dist", "short_dist")
# saveRDS(df.grn.CNS2014, paste("Datasets/CNS_GRNS/df.cns2014_grn.rds", sep = ""))
# #}else{
# # df.grn.CNS2014 <- readRDS(paste("Datasets/CNS_GRNS/df.cns2014_grn.rds", sep = ""))
# #}
# return(df.grn.CNS2014)
# }
#
#
#
#
#
#
# extract_CNS2012 <- function(load_from_file = TRUE){
#
# if(!load_from_file){
#
# source("http://bioconductor.org/biocLite.R")
# # biocLite("ChIPpeakAnno")
# # biocLite("biomaRt")
# # biocLite("Biostrings")
# # install.packages("VennDiagram")
# library(ChIPpeakAnno)
# library(biomaRt)
# library(Biostrings)
# #biocLite("BSgenome.Athaliana.TAIR.TAIR9")
# library(BSgenome.Athaliana.TAIR.TAIR9)
# genome <- BSgenome.Athaliana.TAIR.TAIR9
#
# ensmart = useMart('ENSEMBL_MART_PLANT', "athaliana_eg_gene")
# annotatedData = getAnnotation(ensmart, featureType = "TSS")
#
# df.CNS_2012 <- read.table("Datasets/novelTFmotifs/CNS_paper_2012/CNS_set.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
# df.CNS_2012 <- subset(df.CNS_2012, df.CNS_2012$Species.Name == "Arabidopsis thaliana")
# df.CNS_2012["start_dist_to_TSS"] <- NA
# df.CNS_2012["end_dist_to_TSS"] <- NA
# for(i in 1:nrow(df.CNS_2012)){
# print(paste(i , "of", nrow(df.CNS_2012)))
# gene <- as.character(df.CNS_2012$Gene.Identifier[i])
# df.gene.info <- as.data.frame(annotatedData[gene,])
# cns.region <- as.integer(unlist(strsplit(as.character(df.CNS_2012$Co.ordinates.of.CNS..Upstream.of.Gene.Identifier.TSS[i]), ":")))
#
# if(df.gene.info$strand == 1){
# tss <- as.numeric(df.gene.info$start)
# }else{
# tss <- as.numeric(df.gene.info$end)
# }
# df.CNS_2012$start_dist_to_TSS[i] <- abs(cns.region[1] - tss)
# df.CNS_2012$end_dist_to_TSS[i] <- abs(cns.region[2] - tss)
# }
# #df.CNS_2012 <- subset(df.CNS_2012, df.CNS_2012$end_dist_to_TSS < 2000)
# saveRDS(df.CNS_2012, "Datasets/novelTFmotifs/df.CNS_2012.rds")
# }else{
# df.CNS_2012 <- readRDS("Datasets/novelTFmotifs/df.CNS_2012.rds")
# }
# }
#
#
# prepare_NCS2012_GRN <- function(load_from_file = FALSE){
#
# df.CNS_2012 <- extract_CNS2012()
#
# #### ---
#
# lst.motifs <- get_cell_and_pnas_Paper_PWMs()
# lst.pwm.motif <- lst.motifs[[1]]
# df.motifs <- lst.motifs[[2]]
#
# biocLite("PWMEnrich")
# library(PWMEnrich)
#
#
# library(PWMEnrich.Dmelanogaster.background)
# data(PWMLogn.dm3.MotifDb.Dmel)
#
# print("compute scored CNS2012-GRN")
# #source("http://bioconductor.org/biocLite.R")
# # biocLite("ChIPpeakAnno")
# # biocLite("biomaRt")
# #biocLite("Biostrings", lib = "~/MyRLibs")
# # install.packages("VennDiagram")
# #library(ChIPpeakAnno)
# #library(biomaRt)
# library(Biostrings)
#
#
# if(!load_from_file){
#
# system.time( # begin time measure
# for(i in 1:length(lst.pwm.motif)){
# print(paste("TF Motif ", i, "of", length(lst.pwm.motif)))
# df.grn.CNS2012 <- data.frame(TF = character(), Target = character(), cons_score = numeric(), bind_score = numeric(), cmp_bind_score = numeric(), stringsAsFactors = FALSE)
# names(df.grn.CNS2012) <- c("TF", "Target", "cons_score", "bind_score", "compared_bind_score")
# for(j in 1:nrow(df.CNS_2012)){
#
#
# ids = c("bcd", "gt_FlyReg_FBgn0001150", "Kr")
# sel.pwms = PWMLogn.dm3.MotifDb.Dmel$pwms[ids]
# scores = motifScores(sequence, sel.pwms, raw.scores=TRUE)
# head(scores[[1]])
#
# res = motifEnrichment(sequence, sel.pwms)
# report = sequenceReport(res, 1)
#
#
# hits <- matchPWM(lst.pwm.motif[[i]], as.character(df.CNS_2012$Sequence[j]), with.score = TRUE)
# nhits <- length(hits)
# if(nhits >= 1){
# cmp_bind_score <- min(mcols(hits)$score / maxScore(lst.pwm.motif[[i]])) # should be >= 0.8
# motif.score <- mcols(hits)$score
# if(cmp_bind_score >= 0.8){
# newrow <- data.frame(TF = as.character(df.motifs$TF.locus[i]), Target = as.character(df.CNS_2012$Gene.Identifier[j]),
# cons_score = as.numeric(df.CNS_2012$Conservation.Score[j]), bind_score = motif.score,
# cmp_bind_score = cmp_bind_score, stringsAsFactors = FALSE)
# df.grn.CNS2012 <- rbind(df.grn.CNS2012, newrow)
# }
# }
# }
# names(df.grn.CNS2012) <- c("TF", "Target", "cons_score", "bind_score", "compared_bind_score")
# saveRDS(df.grn.CNS2012, paste("Datasets/CNS_GRNS/tmp/df.cns2012_grn_",i,".rds", sep = ""))
# }
# ) # end time measure
#
# # combine
# df.grn.CNS2012 <- data.frame(TF = character(), Target = character(), cons_score = numeric(), bind_score = numeric(), cmp_bind_score = numeric(), stringsAsFactors = FALSE)
# for(i in 1:length(lst.pwm.motif)){
# df.grn.CNS2012 <- rbind(df.grn.CNS2012, readRDS(paste("Datasets/CNS_GRNS/tmp/df.cns2012_grn_",i,".rds", sep = "")))
# }
# names(df.grn.CNS2012) <- c("TF", "Target", "cons_score", "bind_score", "compared_bind_score")
# }
# saveRDS(df.grn.CNS2012, paste("Datasets/CNS_GRNS/df.cns2012_grn.rds", sep = ""))
# }
#
#
#
#
#
# compute_TFBinding_Matrix_complete <- function( note ="", load_from_file = TRUE){
#
# tfbs <- read.csv("Datasets/TFBS/TFBS_conservation.csv", header = TRUE, sep = ",")
# TFs <- unique(as.character(tfbs$Transcription.factor))
# targets <- unique(as.character(tfbs$Target.gene))
#
# if(!load_from_file){
# tfbs <- read.csv("Datasets/TFBS/TFBS_conservation.csv", header = TRUE, sep = ",")
#
# mat.tfb <- matrix(0, nrow = length(TFs), ncol = length(targets))
# rownames(mat.tfb) <- TFs
# colnames(mat.tfb) <- targets
#
# mat.tfb.cons <- matrix(0, nrow = length(TFs), ncol = length(targets))
# rownames(mat.tfb.cons) <- TFs
# colnames(mat.tfb.cons) <- targets
#
# n <- nrow(tfbs)
# print("compute transcription factor binding matrix - conservation")
# for(i in 1:n){
# print(paste(i,"of",n))
# reg <- as.character(tfbs$Transcription.factor[i])
# targ <- as.character(tfbs$Target.gene[i])
# w.cons <- as.numeric(tfbs$Species.conservation[i])
#
# mat.tfb.cons[reg, targ] <- w.cons
# mat.tfb[reg, targ] <- 1
#
# }
# saveRDS(mat.tfb, file = paste("Datasets/r_objects/mat.tfb.",note,"1.rds", sep =""))
# saveRDS(mat.tfb.cons, file = paste("Datasets/r_objects/mat.tfb.",note,"2.rds", sep =""))
# }else{
# mat.tfb <- readRDS(file = paste("Datasets/r_objects/mat.tfb.",note,"1.rds", sep =""))
# mat.tfb.cons <- readRDS(file = paste("Datasets/r_objects/mat.tfb.",note,"2.rds", sep =""))
# }
#
# return(list(mat.tfb, mat.tfb.cons))
# }
#
#
#
# #if(source == "pcc"){
# # tfbs <- read.csv("Datasets/TFBS/TFBS_PCC.csv", header = TRUE, sep = ",")
# #}
#
# #' Compute transcription factor binding adjacency matrix (based on motif conservation evidence)
# #'
# #' This function
# #'
# #' @param writeResults - write results to file (default = TRUE)
# #' @param ... expressions evaluated in the context of \code{df} and
# #' then fed to \code{\link{order}}
# #' @keywords manip
# #' @export
# #' @examples
# #' lst.mat.tfb <- compute_TFB_matrix_conservation(vec.genes)
# #' mat.tfb <- lst.mat.tfb[[1]]
# #' mat.tfb.cons <- lst.mat.tfb[[2]]
# compute_TFB_matrix_conservation <- function(vec.rel.genes, note ="", load_from_file = TRUE){
#
# if(!load_from_file){
# tfbs <- read.csv("Datasets/TFBS/TFBS_conservation.csv", header = TRUE, sep = ",")
#
# mat.tfb <- matrix(0, nrow = length(vec.rel.genes), ncol = length(vec.rel.genes))
# rownames(mat.tfb) <- vec.rel.genes
# colnames(mat.tfb) <- vec.rel.genes
#
# mat.tfb.cons <- matrix(0, nrow = length(vec.rel.genes), ncol = length(vec.rel.genes))
# rownames(mat.tfb.cons) <- vec.rel.genes
# colnames(mat.tfb.cons) <- vec.rel.genes
#
# n <- nrow(tfbs)
# print("compute transcription factor binding matrix - conservation")
# for(i in 1:n){
# print(paste(i,"of",n))
# reg <- as.character(tfbs$Transcription.factor[i])
# targ <- as.character(tfbs$Target.gene[i])
# w.cons <- as.numeric(tfbs$Species.conservation[i])
# if(is.element(reg, vec.rel.genes) && is.element(targ, vec.rel.genes)){
# mat.tfb.cons[reg, targ] <- w.cons
# mat.tfb[reg, targ] <- 1
# }
# }
# saveRDS(mat.tfb, file = paste("Datasets/r_objects/mat.tfb.",note,"1.rds", sep =""))
# saveRDS(mat.tfb.cons, file = paste("Datasets/r_objects/mat.tfb.",note,"2.rds", sep =""))
# }else{
# mat.tfb <- readRDS(file = paste("Datasets/r_objects/mat.tfb.",note,"1.rds", sep =""))
# mat.tfb.cons <- readRDS(file = paste("Datasets/r_objects/mat.tfb.",note,"2.rds", sep =""))
# }
#
# return(list(mat.tfb, mat.tfb.cons))
# }
#
#
# #' Compute transcription factor binding adjacency matrix (based on AGRIS evidence)
# #'
# #' This function
# #'
# #' @param writeResults - write results to file (default = TRUE)
# #' @param ... expressions evaluated in the context of \code{df} and
# #' then fed to \code{\link{order}}
# #' @keywords manip
# #' @export
# #' @examples
# #' mat.tfb <- compute_TFB_matrix_agris(vec.genes)
# compute_TFB_matrix_agris <- function(vec.rel.genes, df.geneloci_TF, writeResults = TRUE, reset = FALSE){
# if(reset){
# bindingSites <- loadTFBS("Datasets/BindingSite.tbl")
#
# bindingSites$TF.Family[bindingSites$TF.Family == "MYB-RELATED"] <- "MYB_related"
# bindingSites$TF.Family[bindingSites$TF.Family == "BHLH"] <- "bHLH"
# bindingSites$TF.Family[bindingSites$TF.Family == "E2F-DP"] <- "E2F/DP"
# bindingSites$TF.Family[bindingSites$TF.Family == "HB"] <- "HB-PHD"
# bindingSites$TF.Family[bindingSites$TF.Family == "AP2-EREBP"] <- "AP2"
# bindingSites$TF.Family[bindingSites$TF.Family == "BZIP"] <- "bZIP"
# write.csv(bindingSites,"Datasets/TFBS/BindingSite_renamed.csv", row.names=FALSE)
# }else{
# bindingSites <- read.csv("Datasets/TFBS/BindingSite_renamed.csv")
# }
#
# mat.TFBS <- matrix(0, nrow = length(vec.rel.genes), ncol = length(vec.rel.genes))
# rownames(mat.TFBS) <- vec.rel.genes
# colnames(mat.TFBS) <- vec.rel.genes
# print("compute transcription factor binding matrix - AGRIS")
# for(i in 1:length(unique(bindingSites$TF.Family))){
# tf.fam <- as.character(unique(bindingSites$TF.Family)[i])
# subset.TFBS <- subset(bindingSites, TF.Family == tf.fam)
# TFs.binding <- subset(df.geneloci_TF, df.geneloci_TF$Family == tf.fam)
# idx.i <- match(TFs.binding$Locus, vec.rel.genes)
# idx.j <- match(subset.TFBS$Promoter.Locus, vec.rel.genes)
# idx.i <- idx.i[!is.na(idx.i)]
# idx.j <- idx.j[!is.na(idx.j)]
# for(m in 1:length(idx.i)){
# for(n in 1:length(idx.j)){
# mat.TFBS[idx.i[m], idx.j[n]] <- 1
# }
# }
# }
# if(writeResults){
# saveRDS(mat.TFBS, file = "Datasets/r_objects/mat.tfb.agris.rds")
# }
# return(mat.TFBS)
# }
#
#
# #' Extract metabolic enzyme encoding genes by metabolic pathway
# #'
# #' This function identifies all metabolic enzyme encoding genes belonging to each pathway in AraCyc
# #' returns lists of pwys and their metabolic enzyme encoding genes, a list of pw.names and pw.ids
# #' @param writeResults - write results to file (default = TRUE)
# #' @param ... expressions evaluated in the context of \code{df} and
# #' then fed to \code{\link{order}}
# #' @keywords manip
# #' @export
# #' @examples
# #' lst.pw.MEs <- extract_ME_by_PWY()
# #' pw.enzymes <- lst.pw.MEs[[1]]
# #' pw.ids <- lst.pw.MEs[[2]]
# #' pw.names <- lst.pw.MEs[[3]]
# compute_tfbs_between_modules <- function(lst.geneModules, vec.rel.genes, mat.TFBS, idx.dset = 1, idx.partition = 6, note ="", load_from_file = TRUE){ # idx_per_cluster
#
# if(!load_from_file){
# n.modules <- length(lst.geneModules)
# idx <- vector(mode = "list", n.modules)
# for(i in 1:n.modules){
# id <- as.numeric(match(lst.geneModules[[i]], vec.rel.genes))
# idx[[i]] <- id[!is.na(id)]
# }
# mat.TFBS.weights.norm <- matrix(0, nrow = n.modules, ncol = n.modules)
# rownames(mat.TFBS.weights.norm) <- seq(1:n.modules)
# colnames(mat.TFBS.weights.norm) <- seq(1:n.modules)
#
# mat.TFBS.weights <- matrix(0, nrow = n.modules, ncol = n.modules)
# rownames(mat.TFBS.weights) <- seq(1:n.modules)
# colnames(mat.TFBS.weights) <- seq(1:n.modules)
# print("compute Transcription Factor Binding between Modules")
# for(i in 1:n.modules){
# for(j in 1:n.modules){
# idx_per_cluster <- expand.grid(idx[[i]], idx[[j]])
# v.TFBS <- numeric()
# v.TFBS <- apply( idx_per_cluster , 1 , function(x) mat.TFBS[x[1] ,x[2]])
# n.genes <- (length(lst.geneModules[[i]]) * length(lst.geneModules[[j]]))
# mat.TFBS.weights.norm[i,j] <- (sum(v.TFBS) / n.genes)
# mat.TFBS.weights[i,j] <- sum(v.TFBS)
# }
# }
# saveRDS(mat.TFBS.weights.norm, file = paste("Datasets/r_objects/mat.tfb.modules.norm.",note,idx.dset,"_",idx.partition,".rds", sep =""))
# saveRDS(mat.TFBS.weights, file = paste("Datasets/r_objects/mat.tfb.modules.",note, idx.dset,"_",idx.partition,".rds", sep =""))
# return(list(mat.TFBS.weights.norm, mat.TFBS.weights))
# }else{
# mat.TFBS.weights.norm <- readRDS(file = paste("Datasets/r_objects/mat.tfb.modules.norm.",note, idx.dset,"_",idx.partition,".rds", sep =""))
# mat.TFBS.weights <- readRDS(file = paste("Datasets/r_objects/mat.tfb.modules.",note, idx.dset,"_",idx.partition,".rds", sep =""))
# return(list(mat.TFBS.weights.norm, mat.TFBS.weights))
# }
# }
#
#
#
# #### FINAL EVALUATION METHOD?
# compute_TFB_PWY_enrichment <- function(vec.rel.genes, lst.pw.enzymes, vec.pw.ids, vec.pw.names, lst.pw.reactions.id, lst.pw.reactions.enzymes, writeResults = TRUE){
#
# pw.names <- readRDS(file = "Datasets/r_objects/pw_names.rds")
# pw.ids <- readRDS(file = "Datasets/r_objects/pw_ids.rds")
# pw.enzymes <- readRDS(file = "Datasets/r_objects/pw_enzymes.rds")
# pw.reactions.id <- readRDS(file = "Datasets/r_objects/pw_reactions.id.rds")
# pw.reactions.enzymes <- readRDS(file = "Datasets/r_objects/pw_reactions.enzymes.rds")
#
# tfbs <- read.csv("Datasets/TFBS/TFBS_conservation.csv", header = TRUE, sep = ",")
# tfs <- as.character(unique(tfbs$Transcription.factor))
#
# tf_pwy_regs <- data.frame(TF = character(), PWY.ID = character(), REG.RATIO = numeric(), NR.REGS = numeric(), PWY.SIZE = numeric(), PWY.NAME = character())
#
# n.genome <- 27000
#
# mat.pValues <- matrix(1, nrow=length(tfs), ncol= length(pw.ids ))
# colnames(mat.pValues) <-pw.ids
# rownames(mat.pValues) <- tfs
#
# for(i in 1:length(tfs)){
#
# print(paste(i,"of", length(tfs)))
# tfbs.sset <- subset(tfbs, tfbs$Transcription.factor == tfs[i])
#
# targ.genes <- as.character(tfbs.sset$Target.gene)
# n.targ.genes <- length(targ.genes)
#
# for(j in 1:length(pw.reactions.id)){
#
# n.tg.enzymes <- numeric()
# n.pwy.knowns <- 0
#
# for(r in 1:length(pw.reactions.enzymes[[j]])){
#
# if(pw.reactions.enzymes[[j]][[r]][1] != "unknown"){
# n.pwy.knowns <- n.pwy.knowns + 1
# n.reg.reaction.genes <- length(intersect(pw.reactions.enzymes[[j]][[r]], targ.genes))
# n.tg.enzymes <- c(n.tg.enzymes, n.reg.reaction.genes)
#
# # for every individual reaction per pathways
# #n.reaction.genes <- length(pw.reactions.enzymes[[j]][[r]])
# #n.non_reg.pwy.genes <- n.reaction.genes - n.reg.reaction.genes
# #n.non_targ.genes <- n.genome - n.targ.genes
# #counts = (matrix(data = c(n.reg.reaction.genes, n.non_reg.pwy.genes, n.targ.genes, n.non_targ.genes), nrow = 2))
# #p.ind.reaction <- fisher.test(counts)
# }
# }
#
# n.pwy.reactions <- n.pwy.knowns
# n.reg.pwy.reactions <- length(which(n.tg.enzymes > 0))
# n.non_reg.pwy.reactions <- n.pwy.reactions - n.reg.pwy.reactions
# n.non_targ.genes <- n.genome - n.targ.genes
#
# counts = (matrix(data = c(n.reg.pwy.reactions, n.non_reg.pwy.reactions, n.targ.genes, n.non_targ.genes), nrow = 2))
# mat.pValues[i,j] <- fisher.test(counts)$p.value
# }
# }
#
# vec.tmp <- as.numeric(mat.pValues)
# p.values.adjusted <- p.adjust(vec.tmp, method= "BH")
# mat.pValues<- matrix(p.values.adjusted, nrow=length(tfs), byrow = T)
# colnames(mat.pValues) <-pw.ids
# rownames(mat.pValues) <- tfs
#
#
# # COMPLETE pwy genes non pwy genes
# # reg. genes
# # non reg genes
# #n.pwy.genes <- 6 #length(which(pw.enzymes[[i]] != "unknown"))
# #n.reg.pwy.genes <- 4 # length(intersect(pw.enzymes[[i]], targ.genes))
# #n.non_reg.pwy.genes <- n.pwy.genes - n.reg.pwy.genes
# #n.non_targ.genes <- n.genome - n.targ.genes
# #counts = (matrix(data = c(n.reg.pwy.genes, n.non_reg.pwy.genes, n.targ.genes, n.non_targ.genes), nrow = 2))
# #p.complete <- fisher.test(counts)
#
#
# pwy.targets <- intersect(as.character(tfbs.sset$Target.gene), as.character(pw.enzymes[[j]]))
# size.pwy <- length(as.character(pw.enzymes[[j]]))
# nr.reg <- length(pwy.targets)
# ratio.reg <- nr.reg / size.pwy
#
# if(nr.reg >= 1){
# tf_pwy_regs <- rbind(tf_pwy_regs, data.frame(TF = as.character(tfs[i]), PWY.ID = as.character(pw.ids[[j]]), REG.RATIO = ratio.reg, NR.REGS = nr.reg , PWY.SIZE = size.pwy, PWY.NAME = as.character(pw.names[[j]])))
# }
#
# return(tf_pwy_regs)
# write.table(tf_pwy_regs, "/Users/michaelbanf/Documents/postdoctoral_work/programs/pwy_enrichment.xls", row.names = FALSE, sep = "\
|
6f4c4d2569f35b5b7098ac2a0571b2dda9e30306
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dockerfiler/examples/Dockerfile.Rd.R
|
72775a3641b1b96173b45a858c7b65e6ccd68fe8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 173
|
r
|
Dockerfile.Rd.R
|
library(dockerfiler)
### Name: Dockerfile
### Title: A Dockerfile template
### Aliases: Dockerfile
### Keywords: datasets
### ** Examples
my_dock <- Dockerfile$new()
|
a892536cf2a1406ce81add7754d58f7a4d31240c
|
d36beb31d88895dae08898b9c5526aad2c0ef6bb
|
/R/list_all_upstream.R
|
39f51f2029349a4170ff34923e3859e8616c1903
|
[] |
no_license
|
lhmet-forks/catchstats
|
7e3b9dc1791df303059b1b5d49bd98b4aedcff35
|
4a7f497b81fe3b4ea50fa93975bb7747f3e3b3ac
|
refs/heads/master
| 2022-03-18T06:12:18.797350
| 2019-11-19T20:51:05
| 2019-11-19T20:51:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,184
|
r
|
list_all_upstream.R
|
#' Identify all the subcatchments upstream of a list of specified catchments
#'
#' @param hierarchy a dataframe containing catchment id and next downstream (nextds) id fields
#' @param catchnames a vector of catchment ids for which a a list of upstream catchment
#' ids will be returned.
#' @return a list of upstream catchment ids for each catchment in catchnames
#' @note Function depends on the next downstream field in a stream network 'hierarchy' table (dataframe).
#' Can be used to support further aggregation of environmental variables for sub-catchments downstream
#' of a list of catchments of interest (e.g. for calculating barrier numbers).
#' @examples
#'#'data(mwcats)
#'
#'#find all sites upstream of the first five sites in the catchment list
#'data(mwcats)
#'
#'list_all_upstream(hierarchy = mwcats, catchname = mwcats$site[1:5])
#'
#' @export
list_all_upstream <- function(hierarchy, catchnames) {
names(hierarchy) <- c("site", "nextds")
all.us.sites <- vector("list", length(catchnames))
for (i in 1:length(catchnames)) {
us.sites <- allupstream(hierarchy, catchnames[i])
all.us.sites[[i]] <- us.sites
}
return(all.us.sites)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.