blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe4c601e3c393ee94d72b0cd652b3662c28254f9
|
5f179d46db7e5e6c93e8d54b29170e55d57f6513
|
/R/RBaseX.R
|
50e1c83104e6dda0af0a6f61bb178ccdf11eaf4c
|
[] |
no_license
|
BenEngbers/RBaseX
|
727538fab9a10ad72496c2e2eb1d3d6fe81f8be0
|
64a3de8bd3d0e31cb9789a8bf30fa89a50f4729c
|
refs/heads/master
| 2022-11-09T03:55:29.343796
| 2022-11-05T00:25:37
| 2022-11-05T00:25:37
| 243,994,987
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
RBaseX.R
|
#' @title RBaseX
#' @docType package
#' @name RBaseX
#' @description 'BaseX' is a robust, high-performance XML database engine and a highly compliant XQuery 3.1 processor
#' with full support of the W3C Update and Full Text extensions.
#'
#' @importFrom magrittr %>% %<>%
#' @import dplyr
#' @import utils
#' @import R6
#' @import RCurl
#' @import stringr
#' @import tibble
#' @importFrom data.table rbindlist
#' @importFrom openssl md5
#'
#' @details 'RBaseX' was developed using R6. For most of the public methods in the R6-classes, wrapper-functions
#' are created. The differences in performance between R6-methods and wrapper-functions are minimal and
#' slightly in advantage of the R6-version.
#'
#' It is easy to use the R6-calls instead of the wrapper-functions.
#' The only important difference is that in order to execute a query, you have to call ExecuteQuery()
#' on a queryObject.
#'
#' @examples
#' \dontrun{
#' Session <- BasexClient$new("localhost", 1984L, username = "<username>", password = "<password>")
#' Session$Execute("Check test")
#' Session$Execute("delete /")
#' # Add resource
#' Session$Add("test.xml", "<root/>")
#'
#' # Bindings -----
#' query_txt <- "declare variable $name external; for $i in 1 to 3 return element { $name } { $i }"
#' query_obj <- Session$Query(query_txt)
#' query_obj$queryObject$Bind("$name", "number")
#' print(query_obj$queryObject$ExecuteQuery())
#' }
globalVariables(c("."))
"_PACKAGE"
|
444f5814b163dd5d85c72e749d839a57625472d2
|
4e90257f2a8644c7fa3b7f2312c72edd2f2b0e06
|
/project/tidytuesday/tidytuesday_10-19.R
|
55dc1175545350858643a737f9ad85ef201a1fb3
|
[
"CC-BY-4.0"
] |
permissive
|
sjspielman/datascience_for_biologists
|
292b9f0c17a1abf28ebba4e088c53ec0aeea2b70
|
46caf48ba3e0ab2d3e8bdfa222004f0db2b9839e
|
refs/heads/master
| 2023-01-09T15:37:41.490683
| 2022-12-27T01:00:15
| 2022-12-27T01:00:15
| 230,664,262
| 27
| 12
| null | 2022-05-24T23:12:22
| 2019-12-28T20:39:45
|
HTML
|
UTF-8
|
R
| false
| false
| 253
|
r
|
tidytuesday_10-19.R
|
library(tidyverse)
pumpkins <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-10-19/pumpkins.csv')
pumpkins %>%
mutate(weight_lbs = as.numeric(weight_lbs) ) -> pump_num
ggsave("whatever.png")
|
d13b820eb6036bea8884431c5f91ee320f120137
|
d6302bdd07645e0da8ad4430a261d3ebe2149435
|
/man/clusterSummary.Rd
|
7ab4eaf155eb10b8f639cdf842ff400a296aa137
|
[] |
no_license
|
cran/RclusTool
|
3a8fec24edeaedee42ef0f255f32dfc4be107dfe
|
7ed428f6c896889a9b291a279e1e82f8f6d9cd3b
|
refs/heads/master
| 2022-09-04T15:47:33.547991
| 2022-08-29T07:40:08
| 2022-08-29T07:40:08
| 236,879,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,421
|
rd
|
clusterSummary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampleClustering.R
\name{clusterSummary}
\alias{clusterSummary}
\title{Clusters summaries computation}
\usage{
clusterSummary(
data.sample,
label,
features.to.keep = colnames(data.sample$features[["preprocessed"]]$x),
summary.functions = c(Min = "min", Max = "max", Sum = "sum", Average = "mean", SD =
"sd")
)
}
\arguments{
\item{data.sample}{list containing features, profiles and clustering results.}
\item{label}{vector of labels.}
\item{features.to.keep}{vector of features names on which the summaries are computed.}
\item{summary.functions}{vector of functions names for the summaries computation. Could be 'Min', 'Max', 'Sum', 'Average', 'sd'.}
}
\value{
out data.frame containing the clusters summaries.
}
\description{
Save clusters summaries results in a csv file.
}
\details{
clusterSummary computes the clusters summaries (min, max, sum, average, sd) from a clustering result.
}
\examples{
dat <- rbind(matrix(rnorm(100, mean = 0, sd = 0.3), ncol = 2),
matrix(rnorm(100, mean = 2, sd = 0.3), ncol = 2),
matrix(rnorm(100, mean = 4, sd = 0.3), ncol = 2))
tf1 <- tempfile()
write.table(dat, tf1, sep=",", dec=".")
x <- importSample(file.features=tf1)
res <- KmeansQuick(x$features$initial$x, K=3)
labels <- formatLabelSample(res$cluster, x)
cluster.summary <- clusterSummary(x, labels)
}
|
c00e5daa91085fa3698eb461b241448dc5ade944
|
0368ef1544151da85ebb4f52d1d366abe51ca82a
|
/inst/examples/im.convert-ex.R
|
80d208a3ca3fc4db2f692cbebb86f936b5d73a36
|
[] |
no_license
|
yihui/animation
|
6ccd25a6e71c34cbabbe4f7151e5d7d80646b092
|
30cb6f3859383c251d8d91cb59822341e420d351
|
refs/heads/main
| 2023-04-08T00:12:33.697435
| 2023-03-24T15:41:48
| 2023-03-24T15:41:48
| 1,086,080
| 182
| 65
| null | 2022-08-22T15:39:12
| 2010-11-16T19:13:38
|
R
|
UTF-8
|
R
| false
| false
| 555
|
r
|
im.convert-ex.R
|
## generate some images
owd = setwd(tempdir())
oopt = ani.options(interval = 0.05, nmax = 20)
png('bm%03d.png')
brownian.motion(pch = 21, cex = 5, col = 'red', bg = 'yellow',
main = 'Demonstration of Brownian Motion')
dev.off()
## filenames with a wildcard *
im.convert('bm*.png', output = 'bm-animation1.gif')
## use GraphicsMagick
gm.convert('bm*.png', output = 'bm-animation2.gif')
## or a filename vector
bm.files = sprintf('bm%03d.png', 1:20)
im.convert(files = bm.files, output = 'bm-animation3.gif')
ani.options(oopt)
setwd(owd)
|
b352511e45693ed247ca01326ce672ecb557605f
|
7f22f51805020814cb1140323df1423d9d913a39
|
/R/extractAudioFeatures.R
|
0c960cf7d81a10c627a6a982320eef8a7ba4378a
|
[] |
no_license
|
jhinds/communication
|
44da7c61a80e8a319fa1d947cdabf84413c39d08
|
3619bcd7ecdc2679a4c6d1fc81684abb3a380d47
|
refs/heads/master
| 2023-03-12T01:52:09.686031
| 2021-02-25T08:20:02
| 2021-02-25T08:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,347
|
r
|
extractAudioFeatures.R
|
#' Title
#'
#' @param wav.dir Directory of wav files for featurization
#' @param wav.fnames If wav.dir = NULL, a list of wav files for featurization
#' @param windowSize Size of window in milliseconds
#' @param windowShift Amoung to shift window in milliseconds
#' @param windowType Window type
#' @param derivatives Include no (0), first (1), or first and second (2) derivatives of features
#' @param verbose Verbose printing
#' @param recursive Recursively traverse directory for wav files
#'
#' @return An object of class preppedAudio, which consists of a list
#' of `data`, `files`, and `control`. `data` is a list with
#' elements corresponding to audio features for each of the input
#' wav files, where each element is the audio features for the
#' respective wav file. `files` contains metadata about each wav
#' file for which audio features were extracted. `control` records
#' arguments passed to extractAudioFeatures().
#'
#' @examples
#' \dontrun{
#' wav.fnames = list.files(file.path('PATH/TO/WAV/FILES'),
#' pattern = 'wav$',
#' recursive = TRUE,
#' full.names = TRUE
#' )
#' audio <- extractAudioFeatures(wav.fnames = wav.fnames,
#' derivatives = 0
#' )
#' }
#'
#' @export
#'
#' @import wrassp
#' @import tuneR
#' @import signal
#'
#'
extractAudioFeatures <- function(
wav.dir = getwd(),
wav.fnames = NULL,
windowSize = 25,
windowShift = 12.5,
windowType = 'HAMMING',
derivatives = 2,
verbose = 1,
recursive = FALSE
) {
if (!windowType %in% AsspWindowTypes()){
stop('supported window types are ', paste(AsspWindowTypes(), collapse=', '))
}
if (windowType != 'HAMMING') {
warning('only window type "HAMMING" supported for MFCC and TEO calculations; ignoring "windowType" for those features')
}
## general parameters for feature extraction
par <- list(windowSize = windowSize,
windowShift = windowShift,
window = windowType,
toFile = FALSE
)
## feature-specific parameters for feature extraction
preemphasis = .97 # pre-emphasis filter
numFormants = 3 # for formants/bandwidths
gender = 'u' # for setting min/max on F0; we may need to eliminate this to stop F0 from zeroing out
order = 1 # for autocorrelation
numcep = 13 # number of cepstra for MFCC
## files
wav.dir <- gsub('/?$', '/', wav.dir) # add trailing '/' if missing
if (is.null(wav.fnames)){
if (recursive){
wav.fnames <- list.files(wav.dir, recursive = TRUE, full.names = TRUE)
wav.fnames <- wav.fnames[grepl(".wav", wav.fnames)]
} else {
wav.fnames <- Sys.glob(paste(wav.dir,'*.wav', sep = ''))
}
}
out.names <- sub("(.*\\/)([^.]+)(\\.[[:alnum:]]+$)", "\\2", wav.fnames)
out.list <- vector('list', length(out.names))
names(out.list) <- out.names
if (length(out.names) == 0){
stop('no .wav files found in ', wav.dir)
}
description <- data.frame(row.names = out.names)
description$filename <- NA
description$duration <- NA
description$sampling.rate <- NA
description$nsamples <- NA
## go go gadget
for (i in 1:length(out.list)){
if (verbose>=1){
cat('extracting features from', out.names[i], '\n')
}
fname <- wav.fnames[i]
par$listOfFiles <- fname
out <- list()
## origin file properties
au <- read.AsspDataObj(fname)
description$filename[i] <- fname
description$duration[i] <- dur.AsspDataObj(au)
description$sampling.rate[i] <- rate.AsspDataObj(au)
description$nsamples[i] <- numRecs.AsspDataObj(au)
## formants
if (verbose >= 2){
cat(' extracting formants\n')
}
fmBwVals <- do.call(forest, c(par, numFormants = numFormants, preemphasis=preemphasis, estimate=TRUE))
out$formants <- fmBwVals$fm
colnames(out$formants) <- paste('formant', 1:numFormants, sep='')
## formant bandwidths
if (verbose >= 2){
cat(' extracting formant bandwidths\n')
}
out$bandwidths <- fmBwVals$bw
colnames(out$bandwidths) <- paste(colnames(out$formants), '_bw', sep='')
rm(fmBwVals); gc()
## fundamental frequency and pitch
if (verbose >= 2){
cat(' extracting frequency and pitch\n')
}
out$f0_ksv <- do.call(ksvF0,
args=c(par[-match(c('windowSize', 'window'),
names(par))], # not used by ksvF0
gender=gender) # ksvF0-specific args
)$F0
out$f0_mhs <- do.call(mhsF0,
args=c(par[-match(c('windowSize', 'window'), names(par))], # not used by mhsF0
gender=gender) # mhsF0-specific args
)$pitch
## energy
if (verbose >= 2){
cat(' extracting energy\n')
}
out$energy_dB <- do.call(rmsana, par)$rms
## zero crossing rate
if (verbose >= 2){
cat(' extracting zero-crossing rate\n')
}
out$zcr <- do.call(zcrana, par[-match(c('window'), names(par))])$zcr
## 1st order autocorrelation (higher orders are almost identical)
if (verbose >= 2){
cat(' extracting autocorrelation\n')
}
out$autocorrelation <- do.call(acfana, c(par, analysisOrder=order))$acf
out$autocorrelation <- out$autocorrelation[, 1:order, drop=FALSE] # keep as matrix if order==1
## Teager energy operator, aggregated to frame level
## (first need frame ids for each window to align)
if (verbose >= 2){
cat(' extracting Teager energy operator\n')
}
windowSizeSamples <- floor(windowSize / 1000 * description$sampling.rate[i])
windowShiftSamples <- floor(windowShift / 1000 * description$sampling.rate[i])
windowCount <- nrow(out[[1]])
windowStarts <- (0:(windowCount-1)) * windowShiftSamples + 1 # first of windowShiftSamples in window
teo_continuous <- c(0, # deriv can't be calculated for first frame...
au$audio[2:(description$nsamples[i]-1)]^2 -
au$audio[1:(description$nsamples[i]-2)]*au$audio[3:description$nsamples[i]],
0) # ... or last frame
## out$teo = sapply(windowStarts, function(windowStart){
## weighted.mean(teo_continuous[windowStart + 0:(windowSizeSamples-1)]^2,
## w = hamming(windowSizeSamples))
## })
out$teo = sapply(windowStarts, function(windowStart){
mean(teo_continuous[windowStart + 0:(windowSizeSamples-1)]^2)
})
out$teo = as.matrix(log(out$teo, 10))
rm(teo_continuous); gc()
## Mel-frequency cepstral coefficients
if (verbose >= 2){
cat(' extracting Mel-frequency cepstral coefficients\n')
}
bitdepth <- NA
if (!is.null(attr(au, 'trackFormats'))){
bitdepth <- as.numeric(gsub('^INT(\\d+)$', '\\1',
attr(au, 'trackFormats')))
}
if (!is.na(bitdepth)){
au.wave <- Wave(au$audio[,1],
samp.rate=description$sampling.rate[i],
bit = bitdepth)
} else { # Wave will assume default bit depth == 16, with warning
au.wave <- Wave(au$audio[,1],
samp.rate=description$sampling.rate[i])
}
## melfcc trims first/last frames; put them back in for now
if (windowCount > 3){
out$mfcc <-
rbind(
rep(NA, numcep),
melfcc(au.wave,
sr = description$sampling.rate[i],
numcep = numcep,
preemph = -preemphasis,
wintime = par$windowSize/1000,
hoptime = par$windowShift/1000),
rep(NA, numcep)
)
} else {
warning(sprintf(' audio file %s.wav too short: cannot extract Mel-frequency cepstral coefficients, substituting NAs', out.names[i]))
out$mfcc <- matrix(NA, nrow = windowCount, ncol = numcep)
}
## discrete fourier transform (probably not needed)
## out$DFT_spectrum <- do.call(dftSpectrum, par[-match(c('windowSize'), names(par))])
## discard partial frames from wrassp
nframes <- sapply(out, nrow)
windowCount <- min(sapply(out, nrow))
for (feature in 1:length(out)){
if (nrow(out[[feature]]) > windowCount){
out[[feature]] <- out[[feature]][1:windowCount, , drop=FALSE]
}
if (is.null(colnames(out[[feature]]))){
if (ncol(out[[feature]]) == 1){
colnames(out[[feature]]) <- names(out)[feature]
} else {
colnames(out[[feature]]) <- paste(names(out)[feature], 1:ncol(out[[feature]]), sep='')
}
}
}
## interactions
if (verbose >= 2){
cat(' calculating interactions and derivatives\n')
}
out[['energyXzcr']] <- out$energy_dB * out$zcr
colnames(out[['energyXzcr']]) <- 'energyXzcr'
out[['teoXf0']] <- out$teo * out$f0_ksv
colnames(out[['teoXf0']]) <- 'teoXf0'
## first/second derivatives
out <- do.call(cbind, out)
if (!derivatives %in% 0:2){
warning('"derivatives" must be in 0:2, ignoring')
derivatives = 0
}
if (derivatives >= 1){
d1 <- out
d1[1,] <- NA
if (nrow(d1) > 1){
d1[2:nrow(d1),] <- out[2:nrow(d1),] - out[1:(nrow(d1)-1),]
}
colnames(d1) <- paste(colnames(out), '_d1', sep='')
}
if (derivatives == 2){
d2 <- d1
if (nrow(d2) > 1){
d2[2,] <- NA
}
if (nrow(d2) > 2){
d2[3:nrow(d2),] <- d1[3:nrow(d2),] - d1[2:(nrow(d1)-1),]
}
colnames(d2) <- paste(colnames(out), '_d2', sep='')
}
out <- cbind(out, if (derivatives >= 1) d1, if (derivatives == 2) d2)
## NA out first/last frames for which mfcc can't be calculated
out[c(1, nrow(out)),] <- NA
## NA out initial frames for which derivatives can't be calculated
if (derivatives > 0){
out[1:(derivatives + 1),] <- NA # extra frame due to missing mfccs
}
attr(out, 'derivatives') = derivatives
attr(out, 'timestamps') = 0:(windowCount-1) * windowShift
out.list[[i]] <- out
rm(au, fname, windowSizeSamples, windowShiftSamples, windowCount, windowStarts, out)
if (derivatives >= 1)
rm(d1)
if (derivatives == 2)
rm(d2)
}
out.combined <-
list(data = out.list,
files = description,
control = list(windowSize = windowSize,
windowShift = windowShift,
windowType = windowType,
derivatives = derivatives))
class(out.combined) <- "preppedAudio"
return(out.combined)
}
print.preppedAudio <-
function(x, ...){
msg <- paste(as.character(nrow(x$files)),
' prepped audio files with ',
dim(x$data[[1]])[2],
' features.\n',
sep = '')
cat(msg)
}
|
7d4ea6c3e2eac4113a1829e02af0717a9f27af91
|
cda60c696922a3b20f688b41c51b95f92e24813f
|
/workflow_fresh/plot.r
|
876d653744fe7b9f0a0430c9d655e850c2c960ca
|
[] |
no_license
|
cmkobel/gBGC
|
bdd2df9fb7b0c8155cf80d239b9bb928c63212b2
|
7c10850f323df3caa8d784ec93e9759cc96b2d1c
|
refs/heads/master
| 2021-01-14T20:28:24.151670
| 2020-06-13T14:00:21
| 2020-06-13T14:00:21
| 242,748,258
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,435
|
r
|
plot.r
|
library(tidyverse)
library(zoo)
library(gganimate)
library(ggpmisc)
#library(ggpubr)
library(cowplot)
setwd("c:/users/carl/urecomb/lokal")
height = 5; width = 8
# ClonalFrameML files
CF_raw = read_delim("G:/gBGC/carl/workflow_fresh/output/CF_collected.tab",
"\t", escape_double = FALSE, trim_ws = TRUE,
col_names = c('genospecies', 'unitig', 'file', 'parameter', 'post_mean', 'post_var', 'a_post', 'b_post')) %>%
mutate(gene = str_sub(file, 4, str_length(file)-7)) %>% select(-file)
#CF = CF_raw %>% pivot_wider(names_from = parameter, values_from = c(post_mean, post_var, a_post, b_post))
CF = CF_raw %>% filter(parameter == "R/theta") %>% select(-parameter)
# GC3 files
GC_raw = read_delim("G:/gBGC/carl/workflow_fresh/output/GC3_correct.tab",
"\t", escape_double = FALSE, trim_ws = TRUE) %>%
mutate(sample = str_sub(header, 6),
gene = str_sub(file, 4, str_length(file)-13)) %>%
select(-header, -file)
GC3 = GC_raw %>% group_by(genospecies, unitig, gene) %>%
summarize(mean_GC = mean(GC),
mean_GC1 = mean(GC1),
mean_GC2 = mean(GC2),
mean_GC3 = mean(GC3),
n_samples_GC3 = length(GC3)) %>%
ungroup %>%
rename(GC = mean_GC,
GC1 = mean_GC1,
GC2 = mean_GC2,
GC3 = mean_GC3)
GC_all = GC_raw %>% group_by(genospecies, unitig, gene) %>%
summarize(GC = mean(GC),
GC1 = mean(GC1),
GC2 = mean(GC2),
GC3 = mean(GC3)) %>%
ungroup
x_min = 0.20
x_max = .90
# Visualize GC1, 2, 3 distributions
A1 = GC_all %>% #pivot_longer(starts_with("GC"), names_to = "positions", values_to = "proportion GC") %>%
#filter(positions != "GC") %>%
ggplot(aes(GC1)) +
geom_histogram(fill = "red") +
#geom_histogram(aes(mean_GC1), alpha = 0.4, fill = "red") +
#geom_histogram(aes(mean_GC2), alpha = 0.4, fill = "green") +
#geom_histogram(aes(mean_GC3), alpha = 0.4, fill = "blue")
#facet_grid(.~positions) +
theme_light() +
labs(x = "") +
coord_flip() +
theme(legend.position = "none") +
xlim(c(x_min, x_max))
A1
A2 = GC_all %>% #pivot_longer(starts_with("GC"), names_to = "positions", values_to = "proportion GC") %>%
#filter(positions != "GC") %>%
ggplot(aes(GC2)) +
geom_histogram(fill = "green") +
#geom_histogram(aes(mean_GC1), alpha = 0.4, fill = "red") +
#geom_histogram(aes(mean_GC2), alpha = 0.4, fill = "green") +
#geom_histogram(aes(mean_GC3), alpha = 0.4, fill = "blue")
#facet_grid(.~positions) +
theme_light() +
labs(x = "") +
coord_flip() +
theme(legend.position = "none") +
xlim(c(x_min, x_max))
A2
A3 = GC_all %>% #pivot_longer(starts_with("GC"), names_to = "positions", values_to = "proportion GC") %>%
#filter(positions != "GC") %>%
ggplot(aes(GC3)) +
geom_histogram(fill = "blue") +
#geom_histogram(aes(mean_GC1), alpha = 0.4, fill = "red") +
#geom_histogram(aes(mean_GC2), alpha = 0.4, fill = "green") +
#geom_histogram(aes(mean_GC3), alpha = 0.4, fill = "blue")
#facet_grid(.~positions) +
theme_light() +
labs(x = "") +
coord_flip() +
theme(legend.position = "none") +
xlim(c(x_min, x_max))
A3
AA = GC_all %>% pivot_longer(starts_with("GC"), names_to = "position", values_to = "proportion GC") %>%# View
#mutate(position = factor(position, levels=c("GC", "GC3", "GC1", "GC2"))) %>%
filter(position != "GC") %>%
ggplot(aes(`proportion GC`, fill = position)) +
geom_histogram(position = "dodge") +
facet_grid(.~position) +
theme_light() +
labs(x = "") +
#theme(axis.text.x = element_text(angle = -90, vjust = .4)) +
coord_flip() +
#theme(legend.position = "none") +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
) +
theme(axis.text.x = element_text(angle = -90, vjust = .3)) +
xlim(c(x_min, x_max))
AA
B = GC_all %>% pivot_longer(c(GC1, GC2, GC3), names_to = "positions", values_to = "GCn") %>%
ggplot(aes(GC, GCn, color = positions)) +
geom_point(alpha = 0.4) +
geom_smooth(method = "lm") +
theme_light() +
theme(legend.position = "none") +
ylim(c(x_min, x_max)) +
theme(axis.text.x = element_text(angle = -90, vjust = .5))
B
#ggarrange(B, ggarrange(A1, A2, A3, ncol = 3), ncol = 2)
#ggarrange(B, AA, ncol = 2, labels = c("A", "B"))
plot_grid(BB, A, labels = c("A", "B"), ncol = 2, align = "h", axis = "bt")
#ggsave("final_plots/Y.png", height = height, width = width)
# extract statistics from the GC3 measurement
GC_raw %>% group_by(genospecies, unitig) %>%
summarize(n_genes = length(unique(gene))) %>%
spread(unitig, n_genes) %>% View
## Investigate distribution of GC3
d_mean = GC3$GC3 %>% mean
d_sd = GC3$GC3 %>% sd
d_seq = seq(0, 1, 0.01)
d_d = dnorm(d_seq, mean = d_mean, sd = d_sd)*203.41
ggplot() +
geom_histogram(
data = GC3,
mapping = aes(GC3),
bins = 100) +
geom_histogram(
data = GC3,
mapping = aes(GC3),
bins = 100) +
geom_line(
data = tibble(x = d_seq, y = d_d),
mapping = aes(x,y), linetype = "dashed") +
geom_area(
data = tibble(x = d_seq, y = d_d),
mapping = aes(x,y), linetype = "dashed", fill = "blue", alpha = 0.15) +
theme_light()
## PHI files
gather_PHI = function() {
alpha = 0.5
phi_files = list.files(path="c:/users/carl/urecomb/lokal/exports/export7_cf_all_chromids", pattern="*phi_results.tab", full.names=TRUE, recursive=T)
phi_data = tibble()
i = 1
file = phi_files[1]
for (file in phi_files) {
import = read_delim(file, "\t", escape_double = FALSE, trim_ws = TRUE, na = c("--")) %>%
mutate(genospecies = str_sub(basename(file), 24, 24),
unitig = str_sub(basename(file), 8,8),
method = paste(method, detail)) %>%
select(-detail, -genome)# %>% filter(method == "PHI (Permutation):")
print(paste(i, file, dim(import)[1]))
phi_data = bind_rows(phi_data, import)
rm(import)
i = i + 1
}
phi_data %>%
# mutate(method = paste(method, detail),
# unitig = str_sub(genome, 8, 8),
# genospecies = str_sub(genome, 24, 24)) %>%
# select(-detail) %>%
# group_by(method, unitig, genospecies) %>%
# #alpha/length(pvalue), T, F), n_genes = length(pvalue))
spread(method, pvalue) %>%
rename(infsites = `infsites `,
p_maxchisq = 'Max Chi^2:',
p_nss = 'NSS NA:',
p_phi_normal = 'PHI (Normal):',
p_phi_permut = 'PHI (Permutation):')
}
phi_data = gather_PHI() %>% select(genospecies, unitig, infsites, gene, p_phi_permut) %>% mutate(gene = paste0("group", gene)) %>% mutate(unitig = as.numeric(unitig))
gff_data <- read_csv("C:/Users/carl/Desktop/xmfa2mfa/3206-3.gff") %>% select(-X1) %>% mutate(mid = start + ((end-start)/2), length = end-start)
gff2_data <- read_delim("C:/Users/carl/Desktop/xmfa2mfa/Gene_function_pop_gene_data.csv", delim = ';')
# Before I delete gff2_data, I want to investigate a few things.
gff2_data %>% pull
gff_data = left_join(gff_data, gff2_data %>% select(`Gene group`, `Putative function`) %>% rename(gene_group = `Gene group`)) %>% rename(gene = gene_group)
rm(gff2_data)
### Join all data
data = inner_join(GC3, CF) %>% inner_join(phi_data) %>% inner_join(gff_data)
#saveRDS(data, "main_correctGC3_gcall.rds")
data = readRDS("main_correctGC3_gcall.rds")
### informative sites information
### Maybe informative sites can explain R^2
data %>% group_by(genospecies, unitig) %>%
ggplot(aes(infsites)) + geom_histogram() +
facet_grid(genospecies ~ unitig)
infsites_info = data %>% group_by(genospecies, unitig) %>%
summarize(sum_infsites = sum(infsites)) %>%
mutate(p = paste(genospecies, unitig))
infsites_info %>%
ggplot(aes(p, sum_infsites, fill = genospecies)) +
geom_col()
infsites_info %>% filter(unitig == 0)
# Groups: genospecies [5]
# genospecies unitig sum_infsites p
# <chr> <dbl> <dbl> <chr>
# 1 A 0 145718 A 0
# 2 B 0 41606 B 0
# 3 C 0 178092 C 0
# 4 D 0 13112 D 0
# 5 E 0 51992 E 0
### Create a plot that shows the distribution PHI
data %>% ggplot(aes(p_phi_permut)) +
geom_histogram() +
facet_grid(plasmid ~ genospecies)
# Create a plot that shows the distribution of CFML
data %>% ggplot(aes(post_mean)) +
geom_histogram() +
facet_grid(plasmid ~ genospecies)
### Create a plot that shows how PHI and CF correlate
data %>% ggplot(aes(log(post_mean), -p_phi_permut)) +
geom_point(alpha = 0.5) +
facet_grid(genospecies ~ plasmid) +
geom_smooth()
ggsave("g:/gBGC/carl/log/50_A.png")
### Create a plot emulates what lassale did with 20 bins.
### Here it is grouped by plasmids
data %>% ungroup %>% select(genospecies, plasmid, gene, p_phi_permut, GC3) %>%
group_by(genospecies, plasmid) %>%
mutate(sig_rec = if_else(p_phi_permut < 0.05/length(p_phi_permut), 1, 0),
GC3_bin = cut_number(GC3, 20)) %>%
group_by(GC3_bin, add = T) %>%
mutate(n_sig_rec = sum(sig_rec),
mean_GC3 = mean(GC3)) %>%
ggplot(aes(mean_GC3, n_sig_rec)) +
geom_point() +
facet_grid(plasmid ~ genospecies, scales = "free") +
geom_smooth(method = "lm") +
stat_poly_eq(formula = y ~ x,
aes(label = paste(..rr.label.., sep = "~~~")),
parse = TRUE)
### This one is the same, but it is grouped by unitig.
data %>% ungroup %>%
filter(unitig == 0) %>%
select(genospecies, unitig, gene, p_phi_permut, GC3) %>%
group_by(genospecies, unitig) %>%
mutate(sig_rec = if_else(p_phi_permut < 0.05/length(p_phi_permut), 1, 0),
GC3_bin = cut_number(GC3, 20)) %>%
group_by(GC3_bin, add = T) %>%
mutate(n_sig_rec = sum(sig_rec),
mean_GC3 = mean(GC3)) %>%
ggplot(aes(mean_GC3, n_sig_rec)) +
geom_point() +
facet_wrap(~ genospecies, scales = "free") +
geom_smooth(method = "lm") +
stat_poly_eq(formula = y ~ x,
aes(label = paste(..rr.label.., sep = "~~~")),
parse = TRUE)
ggsave("g:/gBGC/carl/log/51_B_.png")
### Now, create a Lassalle-ish plot, but with CF instead of PHI.
data %>% ungroup %>%
filter(unitig == 0) %>%
#filter(plasmid %in% c("3206-3_scaf_1_chromosome-01", "3206-3_scaf_2_chromosome-02", "3206-3_scaf_3_chromosome-00")) %>%
select(genospecies, unitig, gene, post_mean, GC3) %>%
group_by(genospecies, unitig) %>%
mutate(#sig_rec = if_else(p_phi_permut < 0.05/length(p_phi_permut), 1, 0),
GC3_bin = cut_number(GC3, 20)) %>%
group_by(GC3_bin, add = T) %>%
mutate(#n_sig_rec = sum(sig_rec),
median_post_mean = median(post_mean),
mean_GC3 = mean(GC3)) %>%
ggplot(aes(mean_GC3, median_post_mean)) +
geom_point() +
facet_wrap(~ genospecies, scales = "free") +
geom_smooth(method = "lm") +
stat_poly_eq(formula = y ~ x,
aes(label = paste(..rr.label.., sep = "~~~")),
parse = TRUE)
ggsave("g:/gBGC/carl/log/52_B_CF.png")
### Regarding CF, we need to see the data without binning
data %>% ungroup %>%
filter(unitig == 0) %>%
select(genospecies, unitig, gene, post_mean, GC3) %>%
mutate(post_mean = log(post_mean)) %>%
ggplot(aes(GC3, post_mean)) +
geom_point() +
facet_wrap(~genospecies) +
geom_smooth()
ggsave("g:/gBGC/carl/log/53_C_CF.png")
### Let's isolate the most extreme values of recombination, and look at GC3
A = data %>% ungroup %>%
filter(unitig == 0) %>%
group_by(genospecies) %>%
filter(post_mean > quantile(post_mean, .95)) %>%
ggplot(aes(mid, post_mean, color = GC3)) +
geom_point()
B = data %>% ungroup %>%
filter(unitig == 0) %>%
group_by(genospecies) %>%
filter(post_mean > quantile(post_mean, .95)) %>%
ggplot(aes(mid, GC3)) +
geom_point()
ggarrange(A, B, ncol = 1)
top_percent = 1
data %>% ungroup %>%
filter(unitig == 0) %>%
group_by(genospecies) %>%
mutate(`recombination class` = if_else(post_mean > quantile(post_mean, 1-(top_percent/100)), paste0("top ", top_percent,"%"), "rest")) %>%
ggplot(aes(mid, GC3, color = `recombination class`)) +
geom_point() +
facet_wrap(~genospecies)+
labs(x = 'position')
ggsave("g:/gBGC/carl/log/54_position_top10.png")
top_percent = 1
data %>% ungroup %>%
filter(unitig == 0) %>%
group_by(genospecies) %>%
mutate(`recombination class` = if_else(post_mean > quantile(post_mean, 1-(top_percent/100)), paste0("top ", top_percent,"%"), "rest")) %>%
ggplot(aes(GC3, `recombination class`, fill = `recombination class`)) +
geom_boxplot() +
#facet_grid(.~genospecies)
facet_grid(genospecies~.)
ggsave("g:/gBGC/carl/log/55_boxplot_top10.png")
### Create data that is used for the shiny app
# First we have to create the sliding window.
#import gene map
gene_map <- read_delim("C:/Users/carl/urecomb/lokal/exonerate/gene_map.tsv",
"\t", escape_double = FALSE, trim_ws = TRUE) %>%
rename(genospecies = sample_genospecies,
unitig = sample_unitig,
gene = sample_gene) %>% rowid_to_column() %>% group_by(rowid) %>% mutate(SM3_mid = mean(c(SM3_start, SM3_end)))
data = inner_join(data, gene_map)
# if the unitigs agree, we can remove one of them
data %>% transmute(vs = paste(unitig, SM3_unitig)) %>% table
# 0 0 1 1 2 2 3 3
# 12535 1301 1176 511
data = data %>% select(-SM3_unitig)
# How have the genes jumped with the SM3 reference (sanity check)
data %>% mutate(diff = mid - SM3_mid) %>%
ggplot(aes(diff)) +
geom_histogram(bins = 100) +
facet_grid(unitig ~ genospecies) +
labs(caption = "# bins = 100") +
theme(axis.text.x = element_text(angle = 90, vjust = .5))
ggsave("Enquire_Maria.png", height = 8, width = 10)
#test:
data_anim = data %>%
select(genospecies, unitig, gene, GC3, post_mean, post_var, plasmid, SM3_mid, length) %>%
rename(mid = SM3_mid)
sel_gs = 'C'
#for (roll_width in c(100, 250, 500, 750, 1000)) {
widths = c(1, 100, 1000)
widths = c(1, 5, seq(10, 500, 5))
data_anim_binds = tibble(); for (roll_width in widths) {
print(roll_width)
data_anim_binds = bind_rows(data_anim %>%
arrange(mid) %>%
group_by(genospecies, plasmid) %>%
mutate(roll_post_mean = rollapply(post_mean, roll_width, median, fill = NA)) %>% #View
mutate(roll_GC3 = rollapply(GC3, roll_width, median, fill = NA)) %>%
mutate(roll_width = roll_width) %>%
drop_na(),
data_anim_binds)
}
saveRDS(data_anim_binds, 'C:/Users/carl/Documents/test2/data_anim_binds_6_median_GC.rds')
render_frame <- function(sel_gs, sel_roll_width) {
data_anim_binds %>%
filter(genospecies == sel_gs) %>%
filter(roll_width == sel_roll_width) %>%
ggplot(aes(mid, roll_post_mean, color = roll_GC3)) +
geom_point() +
facet_grid(plasmid~., scales = "free") +
scale_color_gradientn(colours = c('red1', 'grey', 'green4')) +
labs(subtitle = paste0("Genospecies ", sel_gs, "\nrolling window width: ", sel_roll_width, " genes"))
}
# test it
render_frame('C', 100)
data_anim_binds %>% filter(genospecies == sel_gs) %>%
filter(roll_width == 100) %>%
ggplot(aes(mid, roll_post_mean, color = roll_GC3)) +
geom_line() +
facet_grid(plasmid~., scales = "free") +
scale_color_gradientn(colours = c('red1', 'grey', 'green4'))
## PHI files
gather_PHI = function() {
alpha = 0.5
phi_files = list.files(path=".", pattern="*phi_results.tab", full.names=TRUE, recursive=T)
phi_data = tibble()
i = 1
for (file in phi_files) {
import = read_delim(file, "\t", escape_double = FALSE, trim_ws = TRUE, na = c("--"))
print(paste(i, file, dim(import)[1]))
phi_data = bind_rows(phi_data, import)
rm(import)
i = i + 1
}
phi_data %>%
mutate(method = paste(method, detail),
unitig = str_sub(genome, 8, 8),
genospecies = str_sub(genome, 24, 24)) %>%
select(-detail) %>%
group_by(method, unitig, genospecies) %>%
#alpha/length(pvalue), T, F), n_genes = length(pvalue))
spread(method, pvalue) %>%
rename(p_maxchisq = 'Max Chi^2:',
p_nss = 'NSS NA:',
p_phi_normal = 'PHI (Normal):',
p_phi_permut = 'PHI (Permutation):')
}
phi_data = gather_PHI()
# Check distribution of p-values from PHI
phi_data %>% pivot_longer(starts_with("p_")) %>%
ggplot(aes(value)) +
facet_grid(name~genospecies) +
geom_histogram()
#################
phi_data %>% ggplot(aes(p_phi_permut)) + geom_histogram()
saveRDS(phi_data, "PHI_final.rds")
write_tsv(phi_data, "PHI_final.tsv")
# Check distribution of p-values from PHI
phi_data %>% pivot_longer(starts_with("p_")) %>%
ggplot(aes(value)) +
facet_grid(name~genospecies) +
geom_histogram()
|
5cf3e1665b284b0374e5815332905f6f91ae635a
|
37eddbfbf657b02849ba56bd3aaba1654fbca04e
|
/code/intermediates.R
|
1f90a703c322725732751981b26b89ac0511d809
|
[] |
no_license
|
jGaboardi/pmedmize
|
f370dca31dae6428b512750c3cfac1cc3cfe3513
|
93828b1c5b1c6610e514a7f7bfb46b49add820e2
|
refs/heads/master
| 2023-01-02T02:02:28.538709
| 2020-10-27T13:41:42
| 2020-10-27T13:41:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,359
|
r
|
intermediates.R
|
"
Definitions for intermediate variables to build individual-level
P-MEDM constraints.
"
assign_householder_item_level <- function(pums, v){
"
Helper function. Assigns the item level of the
householder to all members of the household
(for building household-level intermediates).
"
v <- lapply(split(v, pums$SERIAL), function(s){
rep(s[1], length(s))
})
factor(unlist(v))
}
ager <- function(pums){
"
Person: Age
"
age.brks <- c(0,5,10,15,18,20,21,22,25,30,35,40,45,50,55,60,62,65,67,
70,75,80,85,Inf)
age.labels<-c(paste(age.brks [1:22], c(age.brks[2:23]) - 1, sep=' - '), '85+')
v <- cut(
as.numeric(pums$AGE),
breaks=age.brks,
include.lowest=TRUE,
right=FALSE,
labels=age.labels
)
model.matrix(~v - 1)
}
agecitizenr <- function(pums){
"
Person: Age (Citizenship Status definitions)
"
age.brks.citizen <- c(-Inf,5,18,Inf)
age.citizen.labels <- c('5under', '5-17', '18+')
v <- cut(
as.numeric(pums$AGE),
breaks = age.brks.citizen,
include.lowest = TRUE,
right = FALSE,
labels = age.citizen.labels)
model.matrix(~v - 1)
}
agehhincr <- function(pums){
"
Household: Householder Age (Income Definitions)
"
age.brks.hhinc=c(0,25,45,65,Inf)
age.hhinc.labels=c(paste(age.brks.hhinc[1:3],c(age.brks.hhinc[2:4])-1,sep='-'),'65+')
v <- cut(as.numeric(pums$AGE),
breaks = age.brks.hhinc,
include.lowest = TRUE,
right = FALSE,
labels = age.hhinc.labels)
v <- assign_householder_item_level(pums, v)
model.matrix(~v - 1)
}
agepovr <- function(pums){
"
Person: Age (Poverty Status definitions)
"
age.brks.pov <- c(0,5,6,12,15,16,18,25,35,45,55,65,75,Inf)
age.pov.labels <- c(paste(age.brks.pov[1:12],
c(age.brks.pov[2:13])-1, sep='-'), '75+')
v <- cut(
as.numeric(pums$AGE),
breaks = age.brks.pov,
include.lowest = TRUE,
right = FALSE,
labels = age.pov.labels)
model.matrix(~v - 1)
}
agetenr <- function(pums){
"
Household: Householder Age (Tenure Definitions)
"
age.brks.tenure <- c(0,15,25,35,45,55,60,65,75,85,Inf)
age.tenure.labels<-c(paste(age.brks.tenure[1:9],c(age.brks.tenure[2:10])-1,sep='-'),'85+')
v <- cut(as.numeric(pums$AGE),
breaks = age.brks.tenure,
include.lowest = TRUE,
right = FALSE,
labels = age.tenure.labels)
v <- assign_householder_item_level(pums, v)
model.matrix(~v - 1)
}
builtr <- function(pums){
"
Household: Dwelling Built Year
"
built_key <- read.csv(file.path(data_path, 'PUMS_BUILTYR2.csv'))
v <- factor(built_key$label[match(pums$BUILTYR2,built_key$code)],
levels = unique(built_key$label))
model.matrix(~v - 1)[,-1] # omit `gq` column (placeholder)
}
citizenr <- function(pums){
"
Person: Citizen Status
1: US citizen, 2: naturalized, 3: non citizen
"
v <- factor(ifelse(pums$CITIZEN<2,1,
ifelse(pums$CITIZEN==2,2,3)))
model.matrix(~v - 1)
}
eldr <- function(pums){
"
Household: Presence of People Age 60 and Over
"
v <- factor(unlist(sapply(unique(pums$SERIAL), function(s){
as = pums$AGE[pums$SERIAL == s]
elders = ifelse(any(as >= 60),1,0)
rep(elders,length(as))
})))
model.matrix(~v - 1)
}
famr <- function(pums){
"
Person: In Family Household
"
v <- unlist(sapply(unique(pums$SERIAL),function(s){
fs=pums$FAMSIZE[pums$SERIAL == s][1]
# If multiple members of the household, discard the head (coded 1)
# If only one member of the household, recode the head as 'living alone'
# (1 -> 99)
fr=pums$RELATE[pums$SERIAL==s]
if(length(fr)>1){
fr=fr[-1]
}else{
fr=99
}
# If some or all other household members are related to head, family
# household (1)
# If no other household members are related to head, nonfamily household,
# not living alone (2)
# If head is only household member (coded 99), lives alone (3)
famtype=ifelse(min(fr)<11,1,
ifelse(min(fr)>=11 & max(fr)<99,2,3))
if(max(fr)<99){
rep(famtype,length(fr)+1)
}else{
famtype
}
}))
v[pums$GQ > 1] <- 4 # code group quarters pop as 4
v <- factor(v)
model.matrix(~v - 1)
}
hheadr <- function(pums){
"
Person: Head of Household (yes/no)
"
v <- factor(ifelse(pums$RELATED == 101 & pums$GQ < 2, 1, 0))
model.matrix(~v - 1)
}
hhincr <- function(pums){
"
Household: Income
"
hhinc.brks=c(0,10,15,20,25,30,35,40,45,50,60,75,100,125,150,200,Inf)
hhinc.labels=c(paste0(paste(hhinc.brks[1:15],c(hhinc.brks[2:16])-0.1,sep='-'),'k'),'200k+')
# safeguard - treat negative income as 0 income
HHINCOME.safe = pums$HHINCOME
HHINCOME.safe[HHINCOME.safe < 0] = 0
v <- cut(as.numeric(HHINCOME.safe),
breaks=(1000*hhinc.brks),
include.lowest=TRUE,
right=FALSE,
labels=hhinc.labels)
model.matrix(~v - 1)
}
hheadsexr <- function(pums){
"
Person: Sex of Household Head
"
v <- factor(sapply(pums$SERIAL,function(s){
pums$SEX[pums$SERIAL==s][1]
}),labels=c('Male', 'Female'))
model.matrix(~v - 1)
}
hhracer <- function(pums){
"
Household: Race of Householder
"
race.brks <- c(-Inf,2,3,4,5,6,7,8,9,Inf)
race.labels <-c('White alone',
'Black or African American alone',
'American Indian and Alaska Native alone',
'Asian alone',
'Native Hawaiian and Other Pacific Islander alone',
'Some other race alone',
'Two or more races',
'Two races including Some other race',
'Two races excluding Some other race, and three or more races')
v <- cut(pums$RACE,
breaks=race.brks,
labels=race.labels,
include.lowest=TRUE,
right=FALSE)
v <- assign_householder_item_level(pums, v)
model.matrix(~v - 1)
}
hhhispanr <- function(pums){
"
Household: Hispanic/Latino Ethnicity of Householder
"
v <- factor(ifelse(pums$HISPAN>0,'Hispanic/Latino','Not Hispanic/Latino'))
v <- assign_householder_item_level(pums, v)
model.matrix(~v - 1)
}
hhsizr <- function(pums){
hhsize <- lapply(split(pums, pums$SERIAL), function(s){
rep(nrow(s), nrow(s))
})
hhsize <- unlist(hhsize)
v <- factor(ifelse(hhsize >= 7, '7+', hhsize),
levels=c(as.character(seq(1, 6, by = 1)), '7+'))
model.matrix(~v - 1)
}
hispanr <- function(pums){
"
Person: Hispanic/Latino Ethnicity
"
v <- ifelse(pums$HISPAN > 0, 'Hispanic/Latino', 'Not Hispanic/Latino')
model.matrix(~v - 1)
}
householdr <- function(pums){
"
Person: In Household (vs. Group Quarters)
"
v <- factor(ifelse(pums$GQ<3,1,0))
model.matrix(~v - 1)
}
langr <- function(pums){
"
Person: Language Spoken at Home
1: English, 2: Spanish, 3: Other, 0: NA (age under 5)
"
v <- factor(with(pums,
ifelse(LANGUAGE == 0, 0, 1) *
ifelse(LANGUAGE == 01, 1,
ifelse(LANGUAGE == 12, 2, 3))))
model.matrix(~v - 1)
}
marstr <- function(pums){
"
Person: Marital Status of Household Head
"
v <- factor(unlist(sapply(unique(pums$SERIAL),function(s){
sm=pums$MARST[pums$SERIAL==s]
head_spouse_present = sm[1]
mar_hh=ifelse(head_spouse_present==1,1,0)
rep(mar_hh,length(sm))
})))
model.matrix(~v - 1)
}
minr <- function(pums){
"
Household: Presence of People Under Age 18
"
v <- factor(unlist(sapply(unique(pums$SERIAL), function(s){
as = pums$AGE[pums$SERIAL == s]
minors = ifelse(any(as < 18), 1, 0)
rep(minors, length(as))
})))
model.matrix(~v - 1)
}
povr <- function(pums){
"
Person: Poverty Status
"
v <- factor(
ifelse(pums$POVERTY < 100 & pums$POVERTY > 0,'Below_Pov',
ifelse(pums$POVERTY >= 100, 'Above_Pov', 'Undetermined')
)
)
model.matrix(~v - 1)
}
racer <- function(pums){
"
Person: Race
"
race.brks <- c(-Inf,2,3,4,5,6,7,8,9,Inf)
race.labels <-c('White alone',
'Black or African American alone',
'American Indian and Alaska Native alone',
'Asian alone',
'Native Hawaiian and Other Pacific Islander alone',
'Some other race alone',
'Two or more races',
'Two races including Some other race',
'Two races excluding Some other race, and three or more races')
v <- cut(pums$RACE,
breaks=race.brks,
labels=race.labels,
include.lowest=TRUE,
right=FALSE)
model.matrix(~v - 1)
}
relater <- function(pums){
"
Person: Relation to head of household
"
v <- factor(ifelse(pums$RELATE < 11, 1, 0))
model.matrix(~v - 1)
}
relatedr <- function(pums){
"
Person: Detailed relation to head of household
"
# metadata
related <- read.csv(file.path(data_path, 'PUMS_RELATED.csv'), stringsAsFactors = F)[,c('code','label')]
v <- factor(related$label[match(pums$RELATED,related$code)],
levels=related$label[!is.na(related$code)])
model.matrix(~v - 1)
}
sexr <- function(pums){
"
Person: Sex
"
v <- ifelse(pums$SEX == 1, 'Male', 'Female')
model.matrix(~v - 1)
}
speakengr <- function(pums){
"
Person: English Ability
1: speaks only English, 2: speaks English 'very well',
3: less than 'very well', 0: NA or blank (age under 5)
"
v <- factor(with(pums,
ifelse(SPEAKENG == 0, 0, 1) *
ifelse(SPEAKENG == 3, 1,
ifelse(SPEAKENG == 4, 2, 3))))
model.matrix(~v - 1)
}
tenr <- function(pums){
"
Household: Tenure
"
v <- factor(with(pums, ifelse(OWNERSHP==1, 'Own', 'Rent')))
model.matrix(~v - 1)
}
unitsr <- function(pums){
"
Household: Units in Structure
"
units_key <- read.csv(file.path(data_path, 'PUMS_UNITSSTR.csv'))
v <- factor(units_key$label[match(pums$UNITSSTR, units_key$code)],
levels = units_key$label)
model.matrix(~v - 1)[,-1] # omit `gq` column (placeholder)
}
#####
temp <- function(pums){
"
"
v <- NA
model.matrix(~v - 1)
}
|
d92fa232a006e36f44f81a79c672dae3f9243307
|
e69d3476863aa2f73cdf3787b6f078618f133bbb
|
/week_2/week_2/RCrawler_example/CaseStudies/Case6PttGossiping/test.R
|
fe777cd3aff65fad299dab78cd8eeec6c2053cc1
|
[] |
no_license
|
pumpkinlinlin/CSX_RProject_Spring_2018
|
08d714d8ed9318c2c4aafefeba5ce5a5416a92f5
|
ef3d5abe1ba3351c0a9daef7a71fb3ceb1c725b5
|
refs/heads/master
| 2020-04-24T07:21:47.641885
| 2018-10-01T09:45:53
| 2018-10-01T09:45:53
| 171,796,950
| 1
| 0
| null | 2019-02-21T03:57:13
| 2019-02-21T03:57:13
| null |
UTF-8
|
R
| false
| false
| 474
|
r
|
test.R
|
rm(list=ls(all.names = TRUE))
source("packIntoFunction.R")
listPageUrls = getListPageUrls("Gossiping")[1:5]
listPageUrls
postUrls = unlist(lapply(listPageUrls,getPostUrls))
postUrls
getPostData("https://www.ptt.cc/bbs/Gossiping/M.1431338763.A.1BF.html")
getPostData(postUrls[2])
postData = lapply(postUrls[3:5],getPostData)
postDf = data.frame(do.call(rbind,lapply(postData,function(xx) xx$postData)))
pushDf = do.call(rbind,lapply(postData,function(xx) xx$pushDf))
|
aa5d0e7e67a0bbe310ee986b34dfc0a40b813f11
|
a845ad855b81eec06afe792f5f18a82a8e1c89d8
|
/Week 10/R/SpatialExerciseWickers.R
|
7081df297b9746458bae0a7d4187701a60417a71
|
[] |
no_license
|
mwickers1/Statistical-Machine-Learning
|
a0542d7e8951ba636ef5b48de25deeb6b7a60b42
|
7af2138a4aaece6e28974132eb3e829e2e31ae09
|
refs/heads/master
| 2020-08-08T01:47:10.596237
| 2019-12-11T16:34:41
| 2019-12-11T16:34:41
| 213,664,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 589
|
r
|
SpatialExerciseWickers.R
|
library(dplyr)
library(spatstat)
data <- read.csv('Data/311_Cases.csv', quote = "\"", comment = "", stringsAsFactors = F)
shapes <- read.csv('Data/sf_polygon.csv', stringsAsFactors = F)
data <- data %>%
filter(Latitude != 0)
dataPE <- data %>%
filter(Category == 'Parking Enforcement')
window <- owin(xrange = c(min(shapes$x), max(shapes$x)), yrange = c(min(shapes$y), max(shapes$y)))
pp <- ppp(data$Longitude, data$Latitude, window = window, poly = 'p')
plot(pp)
plot(envelope(pp))
source('R/writekml.r')
write.kml (dataPE, "long", "lat", outfile = "myfile.kml", icon.num = 1)
|
898fb9eef14c268e7ed736281688d35c87dd3e2c
|
bce7dad4675f5edd62b2e74431a2dfc41d028f7a
|
/R/ex/rjmcmc.R
|
9501022a0a33648475ef28f99615dd7f545183eb
|
[] |
no_license
|
volkerschmid/bayeskurs
|
fb06d0feab461dbd4fbbb1323a8f22cd9210c98e
|
601246bc2fe4a4850dc1550e7d92e1cac90277c0
|
refs/heads/main
| 2023-07-12T16:15:14.185395
| 2021-08-19T18:01:51
| 2021-08-19T18:01:51
| 398,031,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,104
|
r
|
rjmcmc.R
|
g.predict<-function(Mod, X)
{
j = Mod[2]
beta0 = Mod[3]
beta1 = Mod[4]
beta2 = Mod[5]
P = 0*X + beta0
if (j >= 2) {P = P + X*beta1}
if (j >= 3) {P = P + (X^2)*beta2}
return(P)
}
g.perterb<-function(M=c(-Inf, 3, 0, 0, 0), Qsd=c(0, 0, 0.1, 0.01, 0.001), LB = c(0, 1, -10, -2, -1 ), UB = c(0, 1, 10, 2, 1 ) , data=data.frame(1:100, rnorm(100)))
{
# unpacking hte parameters
LL = M[1]
j = M[2]
#beta0 = M[3]
#beta1 = M[4]
#beta2 = M[5]
x = data[,1]
y = data[,2]
ORDER = sample(3:(3+j-1), j)
for (i in ORDER)
{
M.prime = M # make the proposal model
M.prime[i] = M.prime[i] + rnorm(1, mean = 0, sd= Qsd[i]) # add random noise to old model
P = g.predict(M.prime, x) # get predicted values
LL.prime = sum(dnorm(y-P, mean= 0, sd=1, log=T)) # compute loglikihood
M.prime[1] = LL.prime # save LL
r = runif(1) # random uniform
MH = exp(LL.prime - LL) # Metropolis-hasting acceptance probability value
if ((r <= MH) & (M.prime[i] >= LB[i]) & (M.prime[i] <= UB[i]) ) {M = M.prime} # if accepted and in bounds
}
return(M)
}
g.birth<-function(M=c(-Inf, 1, 0, 0, 0), Qsd=c(0, 0, 0.1, 0.01, 0.001), LB = c(0, 1, -10, -2, -1 ), UB = c(0, 1, 10, 2, 1 ) , data=data.frame(1:100, rnorm(100)))
{
# unpacking hte parameters
LL = M[1]
j = M[2]
x = data[,1]
y = data[,2]
if (j == 1)
{
M.prime = M # make the proposal model
M.prime[2] = 2
M.prime[4] = runif(1, min = LB[4], max = UB[4]) # propose from prior
P = g.predict(M.prime, x) # get predicted values
LL.prime = sum(dnorm(y-P, mean= 0, sd=1, log=T)) # compute loglikihood
M.prime[1] = LL.prime # save LL
r = runif(1) # random uniform
MH = exp(LL.prime - LL) # Metropolis-hasting acceptance probability value
if (r <= MH) {M = M.prime} # if accepted
}
if (j == 2)
{
M.prime = M # make the proposal model
M.prime[2] = 3
M.prime[5] = runif(1, min = LB[5], max = UB[5]) # propose from prior
P = g.predict(M.prime, x) # get predicted values
LL.prime = sum(dnorm(y-P, mean= 0, sd=1, log=T)) # compute loglikihood
M.prime[1] = LL.prime # save LL
r = runif(1) # random uniform
MH = exp(LL.prime - LL) # Metropolis-hasting acceptance probability value
if (r <= MH) {M = M.prime} # if accepted
}
return(M)
}
g.death<-function(M=c(-Inf, 2, 1, 0.5, 0.0), Qsd=c(0, 0, 0.1, 0.01, 0.001), LB = c(0, 1, -10, -2, -1 ), UB = c(0, 1, 10, 2, 1 ) , data=data.frame(1:100, rnorm(100)))
{
# unpacking hte parameters
LL = M[1]
j = M[2]
x = data[,1]
y = data[,2]
if (j == 3)
{
M.prime = M # make the proposal model
M.prime[2] = 2
M.prime[5] = 0 # propose from prior
P = g.predict(M.prime, x) # kill the parameter
LL.prime = sum(dnorm(y-P, mean= 0, sd=1, log=T)) # compute loglikihood
M.prime[1] = LL.prime # save LL
r = runif(1) # random uniform
MH = exp(LL.prime - LL) # Metropolis-hasting acceptance probability value
if (r <= MH) {M = M.prime} # if accepted
}
if (j == 2)
{
M.prime = M # make the proposal model
M.prime[2] = 1
M.prime[4] = 0 # kill the parameter
P = g.predict(M.prime, x) # get p
LL.prime = sum(dnorm(y-P, mean= 0, sd=1, log=T)) # compute loglikihood
M.prime[1] = LL.prime # save LL
r = runif(1) # random uniform
MH = exp(LL.prime - LL) # Metropolis-hasting acceptance probability value
if (r <= MH) {M = M.prime} # if accepted
}
return(M)
}
g.explore<-function(old, d)
{
Qsd. = c(0, 0, 0.1, 0.01, 0.001)
LB. = c(0, 1, -10, -2, -1 )
UB. = c(0, 1, 10, 2, 1 )
move.type = sample(1:3, 1) # the type of move i.e., perterb, birth, death
if (move.type == 1) {old = g.perterb(M=old, Qsd =Qsd., LB = LB., UB=UB., data= d)}
if (move.type == 2) {old = g.birth(M=old, Qsd =Qsd., LB = LB., UB=UB., data = d)}
if (move.type == 3) {old = g.death(M=old, Qsd =Qsd., LB = LB., UB=UB., data = d )}
return(old)
}
g.rjMCMC<-function(Ndat = 100, Nsamp = 25000, BURN = 1000)
{
#+ (1:Ndat)*0.75
beta0 = 3
beta1 = 0.1
beta2 = 0
data = data.frame(x = 1:Ndat, y = beta0 +rnorm(Ndat)+ (1:Ndat)*beta1 ) # the simulated data
plot(data[,1], data[,2], xlab="x", ylab="y", main = "Simulated Data")
lines(1:Ndat,beta0 + (1:Ndat)*beta1, col="blue", lwd=3 )
points(data[,1], data[,2])
Mod.old = c(-Inf, 1, 4, 0, 0)
for(i in 1:BURN) # the burn in
{
Mod.old = g.explore(old = Mod.old, d = data)
}
print(Mod.old)
REC = Mod.old
for(i in 1:(Nsamp-1)) # the burn in
{
Mod.old = g.explore(old = Mod.old, d = data)
REC = rbind(REC, Mod.old)
rownames(REC) = NULL
}
print(table(REC[,2]))
x = 16
par(mar = c(4,4,1,1), oma = c(1,1,1,1))
layout(mat = matrix(c(1, 2, 3), nrow=1, ncol=3, byrow=T) )
REC = rbind(REC, c(0, 3, 0,0,0)) # just to make the ploting easier
H1 = hist(REC[,3],breaks = seq(-10, 10, length.out = 1001), plot=F)
H2 = hist(REC[REC[,2] >= 2 ,4], breaks = seq(-2, 2, length.out = 1001), plot=F)
H3 = hist(REC[REC[,2] >= 3 ,5], breaks = seq(-1, 1, length.out = 1001), plot=F)
plot(H1$mids, H1$den, type="n", xlab="Beta 0", ylab= "P(Beta 0)",xaxs="i", yaxs="i")
polygon(x=c(H1$mids[1], H1$mids, H1$mids[length(H1$mids)] ), y=c(0, H1$den, 0), col="grey", border=F )
abline( v = beta0, col="blue", lwd=2, lty=2 )
plot(H2$mids, H2$den, type="n", xlab="Beta 1", ylab= "P(Beta 1)",xaxs="i", yaxs="i")
polygon(x=c(H2$mids[1], H2$mids, H2$mids[length(H2$mids)] ), y=c(0, H2$den, 0), col="grey", border=F )
abline( v = beta1, col="blue", lwd=2, lty=2 )
plot(H3$mids, H3$den, type="n", xlab="Beta 2", ylab= "P(Beta 2)",xaxs="i", yaxs="i")
polygon(x=c(H3$mids[1], H3$mids, H3$mids[length(H3$mids)] ), y=c(0, H3$den, 0), col="grey", border=F )
abline( v = beta2, col="blue", lwd=2, lty=2 )
}
g.rjMCMC(Ndat = 20)
|
fd86d50fed01f247f325039cb5319621e69b12a7
|
67de204b7f0550def8eea7d6ca605f43aed653fc
|
/app/lib/analysis/tests/normality/comment.R
|
581e37c48d5173ea5c46dee4e7fbedb84b60e708
|
[] |
no_license
|
andymeneely/sira-nlp
|
b1b1bb8a783adac6a69001565d49d8357a4dd8c5
|
b027a5d7407043b6541e2aa02704a7239f109485
|
refs/heads/master
| 2021-01-11T05:29:16.209735
| 2017-12-09T17:13:19
| 2017-12-09T17:13:19
| 69,055,241
| 1
| 1
| null | 2017-06-19T18:42:12
| 2016-09-23T19:36:51
|
Python
|
UTF-8
|
R
| false
| false
| 926
|
r
|
comment.R
|
# Initialize Boilerplate ----
source("boilerplate.R")
source("data/comment.R")
## Test: Continuous-values Metrics ====
metrics <- names(COMMENT.CV.METRIC.VARIANTS)
test.outcomes <- data.frame()
for (i in 1:length(metrics)) {
metric <- metrics[i]
metric.label <- COMMENT.METRIC.LABELS[[metric]]
cat("[", i, "/", length(metrics), "] ", metric.label, "\n", sep = "")
dataset <- GetCommentMetric(metric)
variants <- unlist(COMMENT.CV.METRIC.VARIANTS[[metric]])
for (j in 1:length(variants)) {
metric.variant <- variants[j]
variant.label <- COMMENT.METRIC.LABELS[[metric.variant]]
cat(" [", j, "/", length(variants), "] ", variant.label, "\n", sep = "")
test.outcome <- GetNormality(dataset[[metric.variant]])
test.outcome <- data.frame("metric" = variant.label, test.outcome)
rownames(test.outcome) <- c()
test.outcomes <- rbind(test.outcomes, test.outcome)
}
}
print(test.outcomes)
|
ab3cfa11c47285af02dd231a67d83cd325d37c01
|
dbdd0281bfdd1fa07542054d4b5c2dbf9fba83f6
|
/R/unmarked/man/formatMult.Rd
|
fb10d86a98a45ab0494bb154744f6c4c8f12f596
|
[] |
no_license
|
kwinner/latentcountmodels
|
0eaeff2750877129da9f1f9d2b0ae9c3af9c32e0
|
d4400cc28453dbf1c4e00e9da7a8ffe880518584
|
refs/heads/master
| 2020-09-11T07:56:15.155998
| 2019-11-14T14:30:38
| 2019-11-14T14:30:38
| 221,996,225
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 836
|
rd
|
formatMult.Rd
|
\name{formatMult}
\alias{formatMult}
\title{Create unmarkedMultFrame from Long Format Data Frame}
\usage{formatMult(df.in)}
\description{This convenience function converts multi-year data in long format to unmarkedMultFrame Object. See Details for more information.}
\details{\code{df.in} is a data frame with columns formatted as follows:
Column 1 = year number \cr
Column 2 = site name or number \cr
Column 3 = julian date or chronological sample number during year \cr
Column 4 = observations (y) \cr
Column 5 -- Final Column = covariates
Note that if the data is already in wide format, it may be easier to create an unmarkedMultFrame object
directly with a call to \code{\link{unmarkedMultFrame}}.}
\value{unmarkedMultFrame object}
\arguments{\item{df.in}{a data.frame appropriately formatted (see Details).}}
|
b53529e99c5266cf0a555119e90a441f46a7e1f6
|
f81cb7ae4215f20e6537f79bfb1bb96a23019f0f
|
/Codes/peak_wordcloud_1.R
|
6c5ec10f3cdf29dc5dbe6674f6bf99ee8dcf7b5f
|
[] |
no_license
|
Sonull/Analysis-of-Tweets-made-during-NBA-2018-Finals
|
53c92efee39502b3b0888408734bb1e6a09ea59b
|
e9a5179cb45ab2704003f71915fefe46745ec5d6
|
refs/heads/master
| 2021-02-26T02:51:12.235768
| 2020-05-10T17:44:00
| 2020-05-10T17:44:00
| 245,489,873
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,375
|
r
|
peak_wordcloud_1.R
|
library(readr)
library(ggplot2)
library(tidyr)
library(dplyr)
library(tidyverse)
library(scales)
library(tm)
library(SnowballC)
library(wordcloud)
library(tidytext)
library(reshape2)
library(gridExtra)
library(corrplot)
library(ggmap)
library(igraph)
library(leaflet)
library(knitr)
library(htmlwidgets)
library(htmltools)
library(jsonlite)
library(yaml)
library(base64enc)
#install.packages("devtools")
library(devtools)
#devtools::install_github("lchiffon/wordcloud2", force = TRUE)
library(wordcloud2)
#library(KoNLP)
setwd("/Users/leeyeji/Desktop/MSBA_20Winter/Customer&Social Analytics /final project/tweets-during-cavaliers-vs-warriors/")
peak <- read.csv("PEAK.csv")
#peak <- read_csv("/Users/leeyeji/Desktop/MSBA_20Winter/Customer&Social Analytics /final project/tweets-during-cavaliers-vs-warriors/PEAK.csv")
# Most frequent terms
frequentTerms <- function(text){
s.cor <- Corpus(VectorSource(text))
s.cor.cl <- cleanCorpus(s.cor)
s.tdm <- TermDocumentMatrix(s.cor.cl)
s.tdm <- removeSparseTerms(s.tdm, 0.999)
m <- as.matrix(s.tdm)
word_freqs <- sort(rowSums(m), decreasing=TRUE)
dm <- data.frame(word=names(word_freqs), freq=word_freqs)
return(dm)
}
# Text transformations
removeURL <- function(x) gsub("http[[:alnum:]]*", "", x)
cleanCorpus <- function(corpus){
corpus.tmp <- tm_map(corpus, removePunctuation)
corpus.tmp <- tm_map(corpus.tmp, stripWhitespace)
corpus.tmp <- tm_map(corpus.tmp, content_transformer(tolower))
corpus.tmp <- tm_map(corpus.tmp, content_transformer(removeURL))
v_stopwords <- c(stopwords("english"), stopwords("spanish"),
"thats","weve","hes","theres","ive", "im","will","can","cant",
"dont","youve","us","youre","youll","theyre","whats","didnt")
corpus.tmp <- tm_map(corpus.tmp, removeWords, v_stopwords)
corpus.tmp <- tm_map(corpus.tmp, removeNumbers)
return(corpus.tmp)
}
# peak Tweets
peak_tweets <- peak %>%
filter(peak_time=="0")
# Wordcloud
peak_wc <- frequentTerms(peak_tweets$text)
head(peak_wc)
wordcloud(peak_wc$word, peak_wc$freq, min.freq=30, colors=brewer.pal(8,"Dark2"))
#export
write.csv(peak_wc,"/Users/leeyeji/Desktop/MSBA_20Winter/Customer&Social Analytics /final project/tweets-during-cavaliers-vs-warriors/final_pick.csv", row.names = FALSE)
#from here please see python jupyter notebook for the further visualizaiton.
|
86a047a998a1800b9f1d4964b02766ab5ea5661f
|
de7ffad3cdc6923f1e98f21887b9fb433403875e
|
/PEpCenteias/man/dengue2013.Rd
|
90a7b6367b6b2f814a8e61a5ae3e25dc40036b44
|
[] |
no_license
|
acgabriel3/EpCenteias
|
cc2c45d15fe8dc1f0dd4b5ebfa4c8dc769c1f010
|
9b122bdd4da25ad902b3c08248a2a4617e3557a5
|
refs/heads/master
| 2020-04-06T23:43:42.046543
| 2020-02-10T20:13:21
| 2020-02-10T20:13:21
| 157,879,323
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 393
|
rd
|
dengue2013.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dados.R
\docType{data}
\name{dengue2013}
\alias{dengue2013}
\title{dengue2013}
\format{Um data frame com 788 linhas e 43 variaveis:}
\source{
MS (Ministério da Saúde)
}
\usage{
dengue2013
}
\description{
dados coletados acerca da situacao epidemiologica brasileira acerca dos dados de dengue
}
\keyword{datasets}
|
57b680c0936bdad7c76f7255109283e39bb25ec1
|
1dd6a9098c42ae910ff9d59b932bfe5a33261995
|
/statistical_testing.R
|
b789f6ac7b0f753e5b83ab7d54334e7ea3ea9351
|
[] |
no_license
|
nanjunbaka/prettyderby
|
4c7ccd9891d741809c8ef098a8abbc855c9fd69b
|
80c2141a573809a5d031defce1ac52ba6e86ed0b
|
refs/heads/master
| 2023-07-01T18:16:13.601815
| 2021-07-31T10:00:55
| 2021-07-31T10:00:55
| null | 0
| 0
| null | null | null | null |
SHIFT_JIS
|
R
| false
| false
| 1,246
|
r
|
statistical_testing.R
|
#visualize 2nd error
rm(list=ls())
z_alpha <- 1.282 #標準正規分布の上側alpha%点(alphaは第1種の過誤の確率)
p_0 <- 0.05
nlist <- c(10,100,1000)
for(n in nlist){
A <- function(x) {sqrt(p_0*(1-p_0)/(x*(1-x)))}
B <- function(x) {(x-p_0)/sqrt(x*(1-x)/n)}
p_2nderr <- function(x) {pnorm(z_alpha*A(x)-B(x))}
if(n==10){
plot(p_2nderr,0,1,ylim=c(0,1),ann=F,lwd=2,lty=1)
par(new=T)
}
if(n==100){
plot(p_2nderr,0,1,ylim=c(0,1),ann=F,lwd=2,lty=2)
par(new=T)
}
if(n==1000){
plot(p_2nderr,0,1,ylim=c(0,1),lwd=2,lty=3,xlab="p")
}
}
#calculate sample size
rm(list=ls())
z_alpha <- 1.282 #標準正規分布の上側alpha%点(alphaは第1種の過誤の確率)
z_beta <- 1.282 #標準正規分布の上側beta%点(betaは第2種の過誤の確率)
p_0 <- 0.05
delta_0 <- 0.05
p_thr <- p_0 + delta_0
A <- function(x) {sqrt(p_0*(1-p_0)/(x*(1-x)))}
n <- p_thr*(1-p_thr)/((p_thr-p_0)^2)*(z_alpha*A(p_thr)+z_beta)^2 #サンプルサイズ
#calculate test statistic
rm(list=ls())
d <- read.csv("training_supercreek.csv",encoding="UTF-8")
n <- nrow(d)
n_fail <- sum(d$flag_failure)
p_hat <- n_fail/n
p_0 <- 0.05
u_0 <-(p_hat-p_0)/sqrt(p_0*(1-p_0)/n)
|
52bcee921c7f32be28a07db297335f0655b5faab
|
671097044dcd383018e5b332aa0eff90bb61894a
|
/R/run_dauer_stan.R
|
bd8bd6bba949ed1fc6b8be3c8a1cd02a76003613
|
[] |
no_license
|
mikeod38/dauergutTest
|
37106579174b4a1dcbb65c606dd543a7004db177
|
d694dbf16835ad2147c40fe11d1f4b1682fdd6d5
|
refs/heads/master
| 2021-01-23T14:15:37.912388
| 2017-09-07T04:09:03
| 2017-09-07T04:09:03
| 102,681,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,638
|
r
|
run_dauer_stan.R
|
#' run_dauer_stan
#'
#' Function runs a stan glmm for dauer data
#'
#' @param df input dataset. Requires a "genotype" column. See type for data column types
#' @param type "dauer" (default),"dauer-grouped". For dauer data, needs to have raw counts, with "dauer" column and "n" column.
#' For grouped data, must have group.id column - usually interaction(genotype,condition)
#'
#' @export
#' @examples df %>% run_dauer_stan(parameters)
run_dauer_stan <- function(df,type,parameters) {
rstan::rstan_options (auto_write=TRUE)
options (mc.cores=parallel::detectCores ()) # Run on multiple cores
if(missing(parameters)) {
parameters = list(chains = 3, cores =4, seed = 2000,iter=6000,
control = list(adapt_delta=0.99))
} else {
parameters = parameters
}
if(missing(type)) {
mod <- rstanarm::stan_glmer(data=df,
formula = cbind(dauer, (n-dauer)) ~ genotype + (1|day) + (1|strainDate) + (1|plateID),
family = binomial(link="logit"),
chains = parameters$chains,
cores = parameters$cores,
seed = parameters$seed,
iter = parameters$iter,
control = list(adapt_delta=0.99))
} else {
if(type == "dauer-grouped") {
mod <- rstanarm::stan_glmer(formula = cbind(dauer, (n-dauer)) ~ 0 + group.id + (1|day) + (1|strainDate) + (1|plateID),
data=df,
family = binomial(link="logit"),
chains = parameters$chains,
cores = parameters$cores,
seed = parameters$seed,
iter=parameters$iter,
control = list(adapt_delta=0.99))
} else {
print("invalid type, write full model")
}
return(mod)
}
}
|
0484b91f33b7d6d579bf1587f6bab557fa612d4b
|
76e8cb6c69438cde64462fe27d9aa71bc7488712
|
/Procesamientodatos/hist_app_simple.R
|
83d9641cf5f484968c7a141ba005deb70d01d26b
|
[
"MIT"
] |
permissive
|
diazshejo/Friday_IA
|
978cd6d81649d0c1cb6415e725d6143e38140015
|
2d6cd2f54fda7b2935eaacc84df2cf4277eac762
|
refs/heads/master
| 2020-04-01T03:50:34.027085
| 2018-10-13T12:44:01
| 2018-10-13T12:44:01
| 152,837,661
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 125
|
r
|
hist_app_simple.R
|
library(shiny)
port <- 8086
runApp(appDir = "hist", port = port, launch.browser = TRUE, quiet = FALSE, host = '127.0.0.1')
|
a6db13f2fd84f0fb4261835f7e8f5abe37066926
|
046d9aaaef9a4f5e607c583c1e876083569d116d
|
/lib/rate_plot.r
|
e0a1dfc4dba8a7dff72078849e5d7bf5cc5a4f3e
|
[] |
no_license
|
JamesWong6VA/goals-of-care
|
fde220fa244ad21e8b7fa4c184df97212a95c671
|
e1e160e6f1784103b6592829540e116a061c512a
|
refs/heads/master
| 2021-09-06T10:47:49.203974
| 2018-01-05T19:54:49
| 2018-01-05T19:54:49
| 120,348,504
| 0
| 0
| null | 2018-02-05T19:05:15
| 2018-02-05T19:05:15
| null |
UTF-8
|
R
| false
| false
| 3,383
|
r
|
rate_plot.r
|
library(tidyr)
library(dplyr, warn.conflicts = FALSE)
library(ggplot2)
library(ggthemes)
library(scales)
# Generate the plot for a rate type performance metric
# This script contains two utility functions:
# * Transform data for plotting convenience
# * Produce plot
# Hueristic for determining the lower limit of plottable number characters given a vector of counts
lower_print_lim <- function(x){ floor(sum(x)/10) }
# Given data frame with "identifier", "numerator" and "misses" columns,
# return data frame transformed for plotting
make_rate_plot_data <- function(input_data){
# Convert multiple columns into two columns of key-value pairs
# where the keys are the column titles and the values are those of the former columns
# For example, the row:
#
# location timepoint numerator misses denominator
# 556 2019 Q3 30 10 40
#
# Will become two rows after the gather operation with the parameters used in this script.
#
# location timepoint denominator event count
# 556 2015 Q1 40 misses 10
# 556 2015 Q1 40 numerator 30
gathered <- gather(input_data, key="event", value="count", misses, numerator)
# Convert key column (title of former columns) to a factor with the specified order of values
gathered$event = factor(gathered$event, levels = c("misses","numerator"))
# Calculate the max digit per ID that should be plotted to avoid overplotting when the height
# of the bar is less than that of the plotted numeral.
count_limits <- gathered %>%
group_by(id) %>%
summarise(limit = lower_print_lim(max(denominator)))
# Create a column count_label with NA for counts that are less than the count limit for the id.
plot_data <- gathered %>%
left_join(count_limits, by="id") %>%
mutate(
count_label = case_when(
count > limit ~ count,
count <= limit ~ as.numeric(NA)
)
)
return(plot_data)
}
generate_rate_plot <- function(plot_data, plot_title = "", y_label = "", line_label="", stack_labels=c("") ){
# Manually selected colors from Viridis Palette
viridis_colors = c(denominator="#440154FF", "#414487FF", numerator="#2A788EFF", "#22A884FF", misses = "#7AD151FF", "#FDE725FF")
# Specify the plot using grammar of graphics
plot <-
ggplot(plot_data, aes(x = timepoint, y = count, group = event)) +
geom_col(aes(fill = event)) +
geom_text(size = 4,
aes(label = count_label),
position = position_stack(vjust = 0.5)) +
geom_point(aes(y = denominator, color = "denominator")) +
geom_line(data = plot_data, aes(
x = as.numeric(timepoint),
y = denominator,
color = "denominator"
)) +
labs(title = plot_title, x = " ", y = y_label) +
scale_y_continuous(breaks=pretty_breaks()) +
scale_colour_manual(
values = viridis_colors,
breaks = c("denominator"),
labels = c(line_label)
) +
scale_fill_manual(
values = viridis_colors,
breaks = c("misses", "numerator"),
labels = stack_labels
) +
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.title = element_blank()
) +
guides(colour = guide_legend(order = 1))
return(plot)
}
|
009cf2a96c3497ba63ecff025c031c4f36e9d71f
|
e74f214e229556aa6395a3f4da75e0d97291748b
|
/mepstrends/hc_pmed/json/code/r/totPOP__TC1name__ind__.r
|
dd7f79f2097127bcd46262cd3c98ed7be5d27d55
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] |
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
a0bc5069eeab97da04446283ff3a6133f97df595
|
c705b91c2e5918447cb738848d7ea6b0c6481507
|
refs/heads/master
| 2023-06-10T14:56:34.711734
| 2021-04-01T14:16:57
| 2021-04-01T14:16:57
| 115,756,417
| 11
| 6
| null | 2018-02-14T14:32:46
| 2017-12-29T21:52:22
|
SAS
|
UTF-8
|
R
| false
| false
| 1,353
|
r
|
totPOP__TC1name__ind__.r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
year <- .year.
# Load RX file
RX <- read_sas("C:/MEPS/.RX..sas7bdat")
if(year <= 2001) RX <- RX %>% mutate(VARPSU = VARPSU.yy., VARSTR = VARSTR.yy.)
if(year <= 1998) RX <- RX %>% rename(PERWT.yy.F = WTDPER.yy.)
# For 1996-2013, merge with RX Multum Lexicon Addendum files
if(year <= 2013) {
Multum <- read_sas("C:/MEPS/.Multum..sas7bdat")
RX <- RX %>%
select(-starts_with("TC"), -one_of("PREGCAT", "RXDRGNAM")) %>%
left_join(Multum, by = c("DUPERSID", "RXRECIDX"))
}
# Merge with therapeutic class names ('tc1_names')
RX <- RX %>%
left_join(tc1_names, by = "TC1") %>%
mutate(count = 1)
TC1pers <- RX %>%
group_by(DUPERSID, VARSTR, VARPSU, PERWT.yy.F, TC1name) %>%
summarise(n_RX = sum(count), RXXP.yy.X = sum(RXXP.yy.X)) %>%
mutate(count = 1) %>%
ungroup
TC1dsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = TC1pers,
nest = TRUE
)
results <- svyby(~count, by = ~TC1name, FUN = svytotal, design = TC1dsgn)
print(results)
|
2b1a3678417371a364c7ed5ae21eaaa84e47f93b
|
0e76443b6de1312c8d3988d2538263db0cd7385b
|
/分析及画图/0. 文献_书籍代码/数量生态学/数量生态学-R语言应用 数据和代码/赖江山/NumEcolR_scripts/chap3_web_PC.R
|
83713027061749592d4b50ca3f68d374464b322d
|
[] |
no_license
|
mrzhangqjankun/R-code-for-myself
|
0c34c9ed90016c18f149948f84503643f0f893b7
|
56f387b2e3b56f8ee4e8d83fcb1afda3d79088de
|
refs/heads/master
| 2022-12-30T08:56:58.880007
| 2020-10-23T03:20:17
| 2020-10-23T03:20:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,046
|
r
|
chap3_web_PC.R
|
### CHAPTER 3: ASSOCIATION MEASURES ###
# Load required libraries
library(ade4)
library(vegan) # should be loaded after ade4 to avoid some conflicts
library(gclus)
library(cluster)
library(FD)
# Import the data from CSV files
spe <- read.csv("DoubsSpe.csv", row.names=1)
env <- read.csv("DoubsEnv.csv", row.names=1)
spa <- read.csv("DoubsSpa.csv", row.names=1)
# Remove empty site 8
spe <- spe[-8,]
env <- env[-8,]
spa <- spa[-8,]
# Dissimilarity and distance measures for (semi-)quantitative data
# ****************************************************************
# Bray-Curtis dissimilarity matrix on raw species data
spe.db <- vegdist(spe) # Bray-Curtis dissimilarity (default)
head(spe.db)
# Bray-Curtis dissimilarity matrix on log-transformed abundances
spe.dbln <- vegdist(log1p(spe))
head(spe.dbln)
# Chord distance matrix
spe.norm <- decostand(spe, "nor")
spe.dc <- dist(spe.norm)
head(spe.dc)
# Hellinger distance matrix
spe.hel <- decostand(spe, "hel")
spe.dh <- dist(spe.hel)
head(spe.dh)
# Dissimilarity measures for binary data
# **************************************
# Jaccard dissimilarity matrix using function vegdist()
spe.dj <- vegdist(spe, "jac", binary=TRUE)
head(spe.dj)
head(sqrt(spe.dj))
# Jaccard dissimilarity matrix using function dist()
spe.dj2 <- dist(spe, "binary")
head(spe.dj2)
# Jaccard dissimilarity matrix using function dist.binary()
spe.dj3 <- dist.binary(spe, method=1)
head(spe.dj3)
# Sorensen dissimilarity matrix using function dist.binary()
spe.ds <- dist.binary(spe, method=5)
head(spe.ds)
# Sorensen dissimilarity matrix using function vegdist()
spe.ds2 <- vegdist(spe, binary=TRUE)
head(spe.ds2)
head(sqrt(spe.ds2))
# Ochiai dissimilarity matrix
spe.och <- dist.binary(spe, method=7)
head(spe.och)
# Graphical display of association matrices
# *****************************************
# The gclus package is required and may be called now, although
# it is called internally by coldiss()
library(gclus)
# Colour plots (also called heat maps, or trellis diagrams in the data
# analysis literature) using the coldiss() function
# ********************************************************************
# Source the coldiss() function
source("coldiss.R") # If necessary, add the path to the file
# Bray-Curtis dissimilarity matrix (on raw data)
# 4 colours with equal-length intervals (useful for comparisons)
windows(title="Bray-Curtis (raw data)",10,5)
coldiss(spe.db, byrank=FALSE, diag=TRUE)
# Same but on log-transformed data
windows(title="Bray-Curtis [ln(y+1) data]",10,5)
coldiss(spe.dbln, byrank=FALSE, diag=TRUE)
# Chord distance matrix
windows(title="Chord",10,5)
coldiss(spe.dc, byrank=FALSE, diag=TRUE)
# Hellinger distance matrix
windows(title="Hellinger",10,5)
coldiss(spe.dh, byrank=FALSE, diag=TRUE)
# Jaccard distance matrix
windows(title="Jaccard",10,5)
coldiss(spe.dj, byrank=FALSE, diag=TRUE)
# Simple matching dissimilarity
# (called the Sokal and Michener index in ade4)
spe.s1 <- dist.binary(spe, method=2)
windows(title="S1 on species data",10,5)
coldiss(spe.s1^2, byrank=FALSE, diag=TRUE)
# Remove the 'das' variable from the env dataset
env2 <- env[,-1]
# Euclidean distance matrix of the standardized env2 data frame
env.de <- dist(scale(env2))
windows(title="Environment",10,5)
coldiss(env.de, diag=TRUE)
# Hellinger distance matrix of the species data (equal-sized categories)
windows(title="Species",10,5)
coldiss(spe.dh, diag=TRUE)
# Euclidean distance matrix on spatial coordinates (2D)
spa.de <- dist(spa)
windows(title="x-y",10,5)
coldiss(spa.de, diag=TRUE)
# Euclidean distance matrix on distance from the source (1D)
das.df <- as.data.frame(env$das, row.names=rownames(env))
riv.de <- dist(das.df)
windows(title="Distance from source",10,5)
coldiss(riv.de, diag=TRUE)
# Compute five binary variables with 30 objects each. Each variable
# has a predefined number of 0 and 1
# Variable 1: 10 x 1 and 20 x 0; the order is randomized
var1 <- sample(c(rep(1,10), rep(0,20)))
# Variable 2: 15 x 0 and 15 x 1, one block each
var2 <- c(rep(0,15), rep(1,15))
# Variable 3: alternation of 3 x 1 and 3 x 0 up to 30 objects
var3 <- rep(c(1,1,1,0,0,0),5)
# Variable 4: alternation of 5 x 1 and 10 x 0 up to 30 objects
var4 <- rep(c(rep(1,5), rep(0,10)), 2)
# Variable 5: 16 objects with randomized distribution of 7 x 1
# and 9 x 0, followed by 4 x 0 and 10 x 1
var5.1 <- sample(c(rep(1,7), rep(0,9)))
var5.2 <- c(rep(0,4), rep(1,10))
var5 <- c(var5.1, var5.2)
# Variables 1 to 5 are put into a data frame
dat <- data.frame(var1, var2, var3, var4, var5)
dim(dat)
# Computation of a matrix of simple matching coefficients
# (called Sokal and Michener index in ade4)
dat.s1 <- dist.binary(dat, method=2)
windows(title="S1 on fictitious data",10,5)
coldiss(dat.s1, diag=TRUE)
# Fictitious data for Gower (S15) index
# Random normal deviates with zero mean and unit standard deviation
var.g1 <- rnorm(30,0,1)
# Random uniform deviates from 0 to 5
var.g2 <- runif(30,0,5)
# Factor with 3 levels (10 objects each)
var.g3 <- gl(3,10)
# Factor with 2 levels, orthogonal to var.g3
var.g4 <- gl(2,5,30)
dat2 <- data.frame(var.g1,var.g2,var.g3,var.g4)
summary(dat2)
# Computation of a matrix of Gower dissimilarity using function daisy()
# Complete data matrix (4 variables)
dat2.S15 <- daisy(dat2, "gower")
range(dat2.S15)
windows(title="S15 on fictitious data - daisy",10,5)
coldiss(dat2.S15, diag=TRUE)
# Data matrix with the two orthogonal factors only
dat2partial.S15 <- daisy(dat2[,3:4], "gower")
windows(title="S15 on fictitious data, 2 factors - daisy",10,5)
coldiss(dat2partial.S15, diag=TRUE)
# What are the dissimilarity values in the dat2partial.S15 matrix?
levels(factor(dat2partial.S15))
# Computation of a matrix of Gower dissimilarity using function gowdis()
# of package FD
library(FD) # If not already loaded
?gowdis
dat2.S15.2 <- gowdis(dat2)
range(dat2.S15.2)
windows(title="S15 on fictitious data - gowdis",10,5)
coldiss(dat2.S15.2, diag=TRUE)
# Data matrix with the two orthogonal factors only
dat2partial.S15.2 <- gowdis(dat2[,3:4])
windows(title="S15 on fictitious data, 2 factors - gowdis",10,5)
coldiss(dat2partial.S15.2, diag=TRUE)
# What are the dissimilarity values in the dat2partial.S15.2 matrix?
levels(factor(dat2partial.S15.2))
# R-mode dissimilarity matrix
# ***************************
# Transpose matrix of species abundances
spe.t <- t(spe)
# Chi-square pre-transformation followed by Euclidean distance
spe.t.chi <- decostand(spe.t, "chi.square")
spe.t.D16 <- dist(spe.t.chi)
windows(title="D16 on fish species (R-mode)",10,5)
coldiss(spe.t.D16, diag=TRUE)
# Jaccard index on fish presence-absence
spe.t.S7 <- vegdist(spe.t, "jaccard", binary=TRUE)
windows(title="S7 on fish species (R-mode)",10,5)
coldiss(spe.t.S7, diag=TRUE)
# Pearson r linear correlation among environmental variables
env.pearson <- cor(env) # default method = "pearson"
round(env.pearson, 2)
# Reorder the variables prior to plotting
env.o <- order.single(env.pearson)
# pairs: a function to plot a matrix of bivariate scatter diagrams
# and correlation coefficients.
# Correlations are given in the upper panel (with significance levels)
source("panelutils.R") # If necessary give path
windows(title="Linear correlation matrix",10,10)
op <- par(mfrow=c(1,1), pty="s")
pairs(env[,env.o], lower.panel=panel.smooth, upper.panel=panel.cor,
diag.panel=panel.hist, main="Pearson Correlation Matrix")
par(op)
# Kendall tau rank correlation among environmental variables
env.ken <- cor(env, method="kendall")
env.o <- order.single(env.ken)
windows(title="Rank correlation matrix",10,10)
op <- par(mfrow=c(1,1), pty="s")
pairs(env[,env.o], lower.panel=panel.smooth, upper.panel=panel.cor,
method="kendall", diag.panel=panel.hist, main="Kendall Correlation Matrix")
par(op)
|
dc5f38de15d866d118617812b784be911a3298de
|
ef3c4696a0ca1ed2f5188c0be4374ef49fd92da8
|
/500494994 Extra Script.R
|
3032143735e85497cc38b99a127cf2608d51cff1
|
[] |
no_license
|
MHuang2001/Financial-Econometrics
|
041872b91dfcf9ce7164f374951eeddf37d32b29
|
32284287be2f41737d55f21a811d5ff515a832bb
|
refs/heads/main
| 2023-04-17T18:13:29.833200
| 2021-04-30T01:47:39
| 2021-04-30T01:47:39
| 362,995,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,348
|
r
|
500494994 Extra Script.R
|
# SID 500494994
#INPUT DATA
##RANK THE INDIVIDUAL STOCK BY THEIR AVERAGE MONTHLY EXCESS RETURNS OVER THE RISK FREE RATE
xsReturns <- stockPriceReturnsData
for (i in stockPriceNames) {
xsReturns[, i] <- averageMonthlyStockReturns - averageRF
}
xsReturns
averageExcess <- colMeans(xsReturns)
averageExcessStdDev <- StdDev(xsReturns)
sharperatio = averageExcess/averageExcessStdDev
barplot(sort(sharperatio))
##RANK THE INDIVIDUAL STOCK BY THEIR MONTHLY EXCESS RETURN STANDARD DEVIATIONS
averageMonthlyStockReturnStdDev <- StdDev(stockPriceReturnsData[trainingTimespan])
barplot(sort(averageExcessStdDev), names.arg=colnames(averageExcessStdDev), main="SD Excess Return", xlab = "Stocks", las=1.5, cex.names=1)
barplot(sort(averageExcess), names.arg=colnames(averageExcess), main="Average Monthly Excess Return", xlab = "Stocks", las=1.5, cex.names=1)
barplot(sort(sharperatio), names.arg=colnames(sharperatio), main="Sharpe Ratio", xlab = "Stocks", las=1.5, cex.names=1)
#Correlation
(correlations <- cor(data))
corrplot(correlations, method = "number", col=col4(20))
title("Correlations 2000-2010", line = 3)
correlations2 <- cor(preparedData)
corrplot(correlations2, method = 'number', col = col4(20))
title("Correlations 2000-2020", line = 3)
col1 <- colorRampPalette(c("#7F0000", "red", "#FF7F00", "yellow", "white",
"cyan", "#007FFF", "blue", "#00007F"))
col2 <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582",
"#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))
col3 <- colorRampPalette(c("red", "white", "blue"))
col4 <- colorRampPalette(c("#7F0000", "red", "#FF7F00", "yellow", "#7FFF7F",
"cyan", "#007FFF", "blue", "#00007F"))
whiteblack <- c("white", "black")
corrdifference <- cor(preparedData) - cor(data)
corrplot(corrdifference, method = 'number', col = col4(20))
title("Correlations Difference", line = 3)
sd(data$rf)
#Stability over time of the distribution of excess returns
barplot(AverageExcess)
AverageRF2 <- colMeans(data$rf10)
AverageExcess2 <- averageMonthlyStockReturns - AverageRF2
barplot(AverageExcess2)
#prepared data 2000-2020
#data 2000-2010
#How many missing rows
sum(is.na(preparedData2))
#QQPLOT
qqnorm(xsReturns$CBA.AX) #1
qqline(xsReturns$CSL.AX, main = "Normal Q-Q Plot CSL.AX")
qqnorm(xsReturns$CSL.AX, main = "Normal Q-Q Plot CSL.AX Excess Returns")
qqnorm(xsReturns$BHP.AX)
qqnorm(xsReturns$SUN.AX)
qqnorm(xsReturns$QAN.AX)
qqnorm(xsReturns$WES.AX)
qqnorm(xsReturns$TLS.AX)
qqnorm(xsReturns$WOW.AX)
qqnorm(xsReturns$ANZ.AX)
qqnorm(xsReturns$NAB.AX)
qqline(xsReturns$WBC.AX, main = "Normal Q-Q Plot WBC.AX Excess Returns")
# Read data from spreadsheet in same folder as this R script and convert to an XTS
rf10 <- read_excel("30dayBBSW.xlsx")
rf10 <- xts(rf$FIRMMBAB30D, order.by = as.Date(rf$date))
rf10 <- rf["20101101/20200630"]
plot(rf10)
#Histogram plus normal curve
hist(averageMonthlyStockReturns, freq=FALSE, col="gray", xlab="Average Monthly Stock Return", main="Average Monthly Stock Return Distribution (2000-2020)")
curve(dnorm(x, mean=mean(averageMonthlyStockReturns), sd=sd(averageMonthlyStockReturns)), add=TRUE, col="blue")
hist(averageExcess, freq=FALSE, col="gray", xlab="Average Monthly Excess Return", main="Average Monthly Excess Return Distribution (2000-2020)")
curve(dnorm(x, mean=mean(averageExcess), sd=sd(averageExcess)), add=TRUE, col="blue")
qqnorm(averageExcess)
#T Test
t.test(averageMonthlyStockReturns)
#Shapiro Wilk Test for Normality
shapiro.test(averageMonthlyStockReturns)
hist(data)
AverageExcess
20191231
#VAR==========================================================
#Historical Simulation
portfolioWeights <- readRDS(file = "heldbackPortfolioWeights.rds")
varWeights <- tail(portfolioWeights, 1)
adjustedReturns <- readRDS(file = "allData.rds")
timespan <- "20100101/20191231"
historicalReturns <- adjustedReturns[timespan]
# Use the seq function to get the list of possible row indices. We will randomly draw indices next.
sample.space <- seq(from=1, to=nrow(historicalReturns))
# Randomly choose days from history using a uniform distribution across possible days in the available history.
# This gives us the indices in our simulation - use each one to get the list of rates of return for the various investments.
sampleIndices <- sample(sample.space,
size = simulations,
replace = TRUE,
prob = rep((1/length(sample.space)), length(sample.space))
)
# Define vector where historical simulation results will be held.
simulatedPortfolioValues = vector(length = simulations)
i <- 1
for (t in sampleIndices) {
# Get the randomly chosen monthly returns
randomDrawOfHistoricalReturns <- historicalReturns[t,]
# Note that the colProds function has been removed because we are not needing to compound daily returns to form a return over a longer period.
# Note the matrix transpose used to multiply returns by weights
simulatedPortfolioValues[i] <- ((1+randomDrawOfHistoricalReturns/100) %*% t(varWeights)) * 10000000
i <- i + 1
}
hist(simulatedPortfolioValues)
(historicalSimulationVaR <- quantile(x = simulatedPortfolioValues, probs = c(0.01)))
#Portfolio Value
plot(portfolioValueXTS)
|
b8574114bca41013b8654b34f297abfefaaac575
|
3286534741f7bf2a5bd19e3bb036bcaaf5320091
|
/str2vec.R
|
13c346b7ea304ff8e6e66598cabcdf6697dc75dd
|
[] |
no_license
|
junyzhou10/OptTrialDesign_ShinyApp
|
26c628fa22ee22f9db144abd1aa6d4a4efec0542
|
34841354bba730fdefd3b9881ed057debe4712ed
|
refs/heads/main
| 2023-05-31T07:50:15.773263
| 2021-06-20T10:07:29
| 2021-06-20T10:07:29
| 362,327,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 448
|
r
|
str2vec.R
|
str2vec <- function(input){
output = NULL
if (input == "") {
output = 0
} else {
str1 = unlist(strsplit(input, ','))
for ( i in 1:length(str1) ) {
if ( grepl('-', str1[i], fixed = T) ) {
output = c( output, seq(as.numeric( unlist(strsplit(str1[i], '-')) )[1], as.numeric( unlist(strsplit(str1[i], '-')) )[2], 1) )
} else {
output = c(output, as.numeric(str1[i]))
}
}
}
return(output)
}
|
e7d33fd0970aadb0fd838bf5fc611906f728293a
|
41776e29d270961456f222772125c0046933be67
|
/Word_cloud_ggplot2.R
|
1366a9dfb6e02f18eba344bb469e281f6a8953c3
|
[] |
no_license
|
vishnusankar55/R
|
1f557d8fa800f942e078da87fe49b5fa1c8a5cb7
|
3ba38398b4fea72daba847db2e58a80935f18ee8
|
refs/heads/master
| 2022-12-12T07:07:07.809225
| 2020-09-05T05:05:58
| 2020-09-05T05:05:58
| 293,005,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,372
|
r
|
Word_cloud_ggplot2.R
|
###########Word Cloud #######
install.packages("tm")
install.packages("wordcloud")
install.packages("RColorBrewer")
library(tm)
library(wordcloud)
#data= file.choose()
setwd("C:/Users/Asus/Desktop/Priyanka/Simpli learn/June_July_2020/Day5_4July")
getwd()
data=read.csv("Word_cloud_Day4.csv",header = T,stringsAsFactors = F)
str(data)
##convert into data frame
data=data.frame(data)
View(data)
#top 6 entries in ur data
head(data)
#bottom 6 entries
tail(data)
colnames(data) <- c("doc_id", "text")
View(data)
#Cleaning the data
Word1=Corpus(DataframeSource(data))
head(Word1) ###wrong command
#corpus represents a collection of (data) texts, typically labeled with text annotations
?Corpus
Word1[[1]][1]
Corpus = tm_map(Word1, content_transformer(tolower))
Corpus_1 = tm_map(Corpus, removeNumbers)
Corpus_2 = tm_map(Corpus_1, removeWords,stopwords("english"))
Corpus_3 = tm_map(Corpus_2, removePunctuation)
Corpus = tm_map(Corpus_3,stripWhitespace)
Corpus[[1]][1]
#Creating a TDM
# A document-term matrix or term-document matrix is a mathematical
# matrix that describes the frequency of terms that occur in a collection of documents.
tdm =TermDocumentMatrix(Corpus)
m=as.matrix(tdm)
m
v=sort(rowSums(m),decreasing = TRUE)
d = data.frame(word=names(v), freq=v)
View(d)
#par(mar=c(1,1,1,1))
dev.off()
wordcloud(words=d$word,freq=d$freq,min.freq=1,max.words=100,random.order=FALSE,
rot.per=0.5,colors=brewer.pal(8,"Dark2"))
warnings()
d$word
d$freq
?wordcloud
#########################ggplot2######example
install.packages("ggplot2")
library(ggplot2)
df <- data.frame(dose=c("D0.5", "D1", "D2"),
len=c(4.2, 10, 29.5))
View(df)
?ggplot2::geom_bar
p<-ggplot(df, aes(x=dose, y=len)) +
geom_bar(stat="identity", color="blue",fill="white")+ theme_minimal()
p
p+coord_flip()
#### Density Plot #######
df1 <- data.frame(
sex=factor(rep(c("F", "M"), each=200)),
weight=round(c(rnorm(200, mean=55, sd=5),
rnorm(200, mean=65, sd=5))))
View(df1)
p1 <- ggplot(df1, aes(x=weight)) + geom_density()
p1
# Add mean line
p1+ geom_vline(aes(xintercept=mean(weight)),
color="blue", linetype="dashed", size=1)
###If we have 2 categorys in the data
p3= ggplot(df1, aes(x=weight, color=sex)) +
geom_density()
p3
####Histogram####
data("airquality")
View(airquality)
ggplot(airquality,aes(x = Ozone)) + geom_histogram (aes (y = ..count..),
binwidth= 5, colour = "black", fill = "blue")
+ scale_x_continuous(name = "Mean in ozone in \nparts per billion",
breaks = seq(0, 175, 25),
limits=c(175))+ scale_y_continuous(name = "count")
############## Boxplot#######
data("airquality")
str(airquality)
airquality$Month<- factor(airquality$Month , labels = c("May", "Jun", "Jul", "Aug","Sep"))
airquality$Month
ggplot (airquality, aes (x = Month, y= Ozone))+ geom_boxplot (fill = "blue", colour = "black")
+ scale_y_continuous (name = "Mean ozone in \nparts per billion",
breaks = seq (0, 175, 25), limits=c(0, 175))
+ scale_x_discrete (name = "Month")
+ ggtitle("Boxplot of mean ozone by month")
####pRACTISE : Example 2 : Wordcloud
install.packages("tm") # for text mining
install.packages("SnowballC") # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
# Load
library("tm")
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
# Read the text file from internet
filePath <- "http://www.sthda.com/sthda/RDoc/example-files/martin-luther-king-i-have-a-dream-speech.txt"
text <- readLines(filePath)
# Load the data as a corpus
docs <- Corpus(VectorSource(text))
inspect(docs)
##ansformation is performed using tm_map() function to replace,
#for example, special characters from the text.
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
#Convert the text to lower case
docs <- tm_map(docs, content_transformer(tolower))
# Remove numbers
docs <- tm_map(docs, removeNumbers)
# Remove english common stopwords
docs <- tm_map(docs, removeWords, stopwords("english"))
# Remove your own stop word
# specify your stopwords as a character vector
docs <- tm_map(docs, removeWords, c("blabla1", "blabla2"))
# Remove punctuations
docs <- tm_map(docs, removePunctuation)
# Eliminate extra white spaces
docs <- tm_map(docs, stripWhitespace)
# Text stemming
# docs <- tm_map(docs, stemDocument)
#Document matrix is a table containing the frequency of the words. Column names are words and row names are documents.
#The function TermDocumentMatrix() from text mining package can be used as follow
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
#set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colorPalette = "black")
#kindly download the ggplot2 cheatsheet
#https://github.com/rstudio/cheatsheets/blob/master/data-visualization-2.1.pdf
|
369495f0b2f1e2809b16223668056957252d27a8
|
8ac3b63c2fd288c9d56a7efaf8c3caf04333f6e7
|
/logging_in_r.R
|
3d4bff77c969a50fa2572011b7d94f5abc0e2e89
|
[] |
no_license
|
OlivierNDO/stat_funcs_r
|
e89c11cedd117659393711a470a66638393ad7b2
|
b7c6ab3771392292f30d28151c91952f233c3c5a
|
refs/heads/master
| 2020-03-06T21:57:09.043363
| 2020-02-24T14:15:20
| 2020-02-24T14:15:20
| 127,090,602
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 703
|
r
|
logging_in_r.R
|
# Script config
library(logging)
config_log_folder = 'C:/.../.../.../.../'
config_log_file = 'abc_model.log'
config_logger_name = 'abc_model'
# Setting up logger
logging::basicConfig()
logging::addHandler(logging::writeToFile,
logger = config_logger_name,
file = paste0(config_log_folder, config_log_file))
# Examples of messages to add in logging text file
logging::loginfo("hello world", logger = paste0(config_logger_name, '.module'))
logging::logwarn('this is a warning', logger = paste0(config_logger_name, '.module'))
logging::logerror('this is an error', logger = paste0(config_logger_name, '.module'))
logging::logReset()
|
323a6a804fb254e2524de6ea620deb17513ab5eb
|
12ba6afc7836493cd03f51321d3fcb88db8feb4a
|
/R/rts_process.R
|
fca938fbef4f2cb88a4e40488645f078695537dc
|
[
"Apache-2.0"
] |
permissive
|
wangbo2020/CTPReutersComparison
|
ff159a6b6c99aa7d121fe4156b67d7799c66bb19
|
7ac7fea8baecd0fe6cd0a5b9bf7e1870f9ad3182
|
refs/heads/master
| 2021-01-16T18:18:38.437975
| 2014-10-20T15:08:33
| 2014-10-20T15:08:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,522
|
r
|
rts_process.R
|
library(pipeR)
library(data.table)
library(dplyr)
library(foreach)
library(doParallel)
rts.files <- paste0("data/Reuters/", list.files("data/Reuters", "gz", recursive=TRUE) )
# check.names <- sapply(1:file.num, FUN=function(i){
# data.i <- read.csv( rts.files[i] )
# return( names(data.i) )
# })
#
# check.names <- t(check.names) %>>% data.frame()
# View( check.names )
header <- paste("RIC", "Date", "Time",
"Trade.Price", "Trade.Volume", "Trade.MarketVWAP",
"Quote.BidPrice", "Quote.BidSize", "Quote.AskPrice", "Quote.AskSize",
"OI.Volume", sep=",")
writeLines(header, "data/Reuters/Reuters.csv")
log <- foreach(i=1:length(rts.files), .combine="rbind" ) %do% {
data.i <- read.csv(rts.files[i]) %>>% data.table()
setnames(data.i, old=names(data.i)[1:3], new=c("RIC", "Date", "Time"))
data.trade <- filter(data.i, Type == "Trade") %>>%
select( RIC, Date, Time, Price, Volume, Market.VWAP)
setnames(data.trade, old=names(data.trade)[4:6],
new=c("Trade.Price", "Trade.Volume", "Trade.MarketVWAP"))
data.quote <- filter(data.i, Type == "Quote") %>>%
select( RIC, Date, Time, Bid.Price, Bid.Size, Ask.Price, Ask.Size)
setnames(data.quote, old=names(data.quote)[4:7],
new=c("Quote.BidPrice", "Quote.BidSize", "Quote.AskPrice", "Quote.AskSize") )
data.oi <- filter(data.i, Type == "Open Interest") %>>%
select( RIC, Date, Time, Volume)
setnames(data.oi, old="Volume", new="OI.Volume")
# debug
data.oi <- filter(data.oi, !duplicated(data.oi$Time))
data.merge <- merge(data.trade, data.quote,
by=c("RIC", "Date", "Time"), all=TRUE) %>>%
merge( data.oi, by=c("RIC", "Date", "Time"), all=TRUE)
# message(rts.files[i])
# message(paste0("i = ", i, ", ", nrow(data.merge),
# " lines of data has been written out."))
write.table(data.merge, "data/Reuters/Reuters.csv", sep=",",
row.names=FALSE, col.names=FALSE, append=TRUE)
log.i <- c(rts.files[i], nrow(data.merge), ncol(data.merge))
return(log.i)
}
log <- data.frame(log)
names(log) <- c("Files", "nrow", "ncol")
write.csv(log, "data/Reuters/log.csv", row.names=FALSE)
## Cannot Append in write.csv
# http://stackoverflow.com/questions/7351049/write-csv-a-list-of-unequally-sized-data-frames
# That's a warning, not an error. You can't change append=FALSE with write.csv.
# ?write.csv says:
#
# Attempts to change ‘append’, ‘col.names’, ‘sep’, ‘dec’ or ‘qmethod’ are ignored,
# with a warning.
#
# Use write.table with sep="," instead.
## Bug
# task 93 failed - "Join results in 25221 rows; more than 25217 = max(nrow(x),nrow(i)). Check for duplicate key values in i, each of which join to the same group in x over and over again. If that's ok, try including `j` and dropping `by` (by-without-by) so that j runs for each group to avoid the large allocation. If you are sure you wish to proceed, rerun with allow.cartesian=TRUE. Otherwise, please search for this error message in the FAQ, Wiki, Stack Overflow and datatable-help for advice."
## Debug Note 1
# in file 93, data.oi has two lines with same Time
# data.oi[ duplicated(data.oi$Time), ]
# RIC Date Time OI.Volume
# 1: CRSMF5 20141009 10:49:36.494 661720
# data.oi[ duplicated(data.oi$Time, fromLast=TRUE), ]
# RIC Date Time OI.Volume
# 1: CRSMF5 20141009 10:49:36.494 661722
# Here I just omitted one line.
## Debug Note 2
# file 342 has same problem, but
|
1d63b54c52988352566759d8fee88ff83cbb7c60
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/paws/man/networkmanager_untag_resource.Rd
|
f9ef05b8a5c06508c2bf48c24a2d90ae2f4a179e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 666
|
rd
|
networkmanager_untag_resource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/networkmanager_operations.R
\name{networkmanager_untag_resource}
\alias{networkmanager_untag_resource}
\title{Removes tags from a specified resource}
\usage{
networkmanager_untag_resource(ResourceArn, TagKeys)
}
\arguments{
\item{ResourceArn}{[required] The Amazon Resource Name (ARN) of the resource.}
\item{TagKeys}{[required] The tag keys to remove from the specified resource.}
}
\description{
Removes tags from a specified resource.
}
\section{Request syntax}{
\preformatted{svc$untag_resource(
ResourceArn = "string",
TagKeys = list(
"string"
)
)
}
}
\keyword{internal}
|
da7938016b84bb87f4de93a7374bf2f6b25e205b
|
d2f7e45413cbbfcea96617824908f954175198da
|
/plot5.R
|
f8a668c0f8ec05431c47370ce195db85c05f9955
|
[] |
no_license
|
cmarieelder/particulate_analysis
|
5d3c656e933a8d9affc4c89641205f618762d1e1
|
edab2bcf65d2eec2b21869e06f2632b367046dc4
|
refs/heads/master
| 2022-12-01T11:43:01.926958
| 2020-08-10T09:57:45
| 2020-08-10T09:57:45
| 284,366,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,570
|
r
|
plot5.R
|
# Author: Cynthia Elder
# Date: 8/2/2020
# Creates a plot to show how emissions from motor vehicle sources have changed
# from 1999–2008 in Baltimore City.
plot5 <- function() {
# Load the required packages -----------------------------------------------
list_of_packages <- c("dplyr", "ggplot2", "ggpmisc")
lapply(list_of_packages, library, character.only = TRUE)
# Read data into global environment ----------------------------------------
source("src/read_particulate_data.R")
# Set hard-coded variables -------------------------------------------------
BALTIMORE_FIPS <- "24510"
# Get SCC data related to motor vehicles -----------------------------------
scc_vehicle <- subset(SCC, grepl("[Hh]ighway.*Vehicle", SCC.Level.Two))
# Get Baltimore NEI data that matches the motor vehicle SCC identifiers ----
nei_vehicle <- NEI %>%
filter(fips == BALTIMORE_FIPS) %>%
subset(SCC %in% scc_vehicle$SCC) %>%
select(year, Emissions, type) %>%
transform(year = factor(year))
# Sum the emissions by year ------------------------------------------------
nei_year <- with(nei_vehicle, tapply(Emissions, year, sum, na.rm = TRUE))
nei_year_df <- data.frame(year = names(nei_year),
total_emissions = nei_year)
# Create plot of year vs. total emissions ----------------------------------
title <- "Motor Vehicle PM2.5 Emissions in Baltimore City, MD (1999-2008)"
nei_year_plot <-
ggplot(data = nei_year_df, aes(x = year, y = total_emissions, group = 1,
fill = year)) +
geom_bar(stat = "identity", show.legend = FALSE) +
geom_smooth(method = "lm", se = FALSE, show.legend = FALSE) +
stat_poly_eq(formula = y ~ x, label.x = "right", label.y = 1,
aes(label = paste(..eq.label.., ..rr.label..,
sep = "~~~")),
parse = TRUE) +
labs(x = "Year", y = "PM2.5 Emissions (tons)", title = title) +
scale_fill_manual(values = c("brown3", "darkorange2", "darkgoldenrod1",
"chartreuse4")) +
geom_text(aes(label = format(round(total_emissions), big.mark = ",",
scientific = FALSE)),
vjust = 1.6, color = "white", size = 3.5)
# Generate the plot as a PNG -----------------------------------------------
png(file = "results/plot5.png")
print(nei_year_plot)
dev.off()
}
plot5()
|
b823c5d30244a6f5152c6e88fed5db32f2410e77
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gEmblemedIconGetEmblems.Rd
|
b64619e475f46d269464f25adfeeb51d4dbc0898
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 351
|
rd
|
gEmblemedIconGetEmblems.Rd
|
\alias{gEmblemedIconGetEmblems}
\name{gEmblemedIconGetEmblems}
\title{gEmblemedIconGetEmblems}
\description{Gets the list of emblems for the \code{icon}.}
\usage{gEmblemedIconGetEmblems(object)}
\arguments{\item{\verb{object}}{a \code{\link{GEmblemedIcon}}}}
\details{Since 2.18}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
c704d6c2fb5646500e5c957e71c49d1abdccd098
|
60fe11cdf42e4dbfa7c5b96c89e31f8001f06a75
|
/Plot2.R
|
098829ebbc214cbab9a734ea8d563ee77e4d1813
|
[] |
no_license
|
rajpaz/ExData_Plotting1
|
8e6daea1324b6b8588d4b334ee65545771c25619
|
0aa7fb46bdd8c92442a2c5d6636b9e90048e8a17
|
refs/heads/master
| 2021-01-22T00:59:26.855566
| 2016-01-11T00:41:13
| 2016-01-11T00:41:13
| 49,376,538
| 0
| 0
| null | 2016-01-10T17:09:26
| 2016-01-10T17:09:24
| null |
UTF-8
|
R
| false
| false
| 259
|
r
|
Plot2.R
|
# Plot
source("./getFebData.R")
# plot 2
if (!exists("febData"))
febData = getFebData()
png("./Plot2.png", width = 480, height = 480)
plot(febData$date_time,febData$Global_active_power,
ylab="Global Active Power (kilowatts)",type="l",xlab="")
dev.off()
|
b95126c9c9e48715e85a5af9a47ff93221aa4461
|
1aee971dc7c8407cc9a50f59bde7b7b6998a6489
|
/tests/testthat/test-pin-dataframe.R
|
6834fb974a3e84327a371f11768b80cee3321981
|
[
"Apache-2.0"
] |
permissive
|
jduckles/pins
|
c9290eba050214db6823cfe0fbb426af09bf4c80
|
199b8b1b0ba3da3449158a60a17b7321fcd06ee5
|
refs/heads/master
| 2020-09-09T00:51:36.383883
| 2019-11-11T19:50:33
| 2019-11-11T19:50:33
| 221,294,526
| 0
| 0
|
Apache-2.0
| 2019-11-12T19:23:07
| 2019-11-12T19:23:06
| null |
UTF-8
|
R
| false
| false
| 402
|
r
|
test-pin-dataframe.R
|
context("pin dataframe")
test_that("can pin() data frame", {
roundtrip <- pin(iris, "iris")
expect_equal(as.data.frame(roundtrip), iris)
expect_equal(as.data.frame(pin_get("iris")), iris)
})
test_that("can sanitize data frame names", {
name <- "___sdf ds32___42342 dsf dsf dsfds____"
expect_equal(
pin_default_name(name, board_default()),
"sdf-ds32-42342-dsf-dsf-dsfds"
)
})
|
aa5bbdef85bcb568e5dd24ff2c0797c038a9583c
|
fedc48dd7f973fa7e44e4c7fba79b72c3c40c9f3
|
/FRED_GDP_COMPONENTS.R
|
bd12d58f454338fedd209748d28c52352f13e595
|
[] |
no_license
|
yanlesin/FRED_GRAPHS
|
b759f4b4a8a04ed894722d9ae8abbb382c488da2
|
d0ff985bae97462a94e7227e226d112d6ddfac78
|
refs/heads/master
| 2020-04-07T07:07:14.201349
| 2018-11-20T19:11:53
| 2018-11-20T19:11:53
| 158,164,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,737
|
r
|
FRED_GDP_COMPONENTS.R
|
#Components of GDP: Compare their contributions from 1947 to 2018
library(fredr)
library(tidyverse)
library(dygraphs)
library(xts)
library(rvest)
# FRED Key ----------------------------------------------------------------
# fredr_set_key() sets a key as an environment variable for use with
# the fredr package in the current session. The key can also be set
# in the .Renviron file at the user or project level scope. You can
# edit the file manually by appending the line FRED_API_KEY = my_api_key,
# where my_api_key is your actual key (remember to not surround the key
# in quotes). The function usethis::edit_r_environ() does this safely.
# Run base::readRenviron(".Renviron") to set the key in the current
# session or restart R for it to take effect. The variable will be set
# in subsequent sessions in the working directory if you set it with
# project level scope, or everywhere if you set it with user level scope.
readRenviron("~/.Renviron")
# Series info -------------------------------------------------------------
series_info_1 <- fredr_series_search_id(search_text = "PCECC96",
limit = 100) %>%
filter(observation_start=='1947-01-01') %>%
filter(last_updated>='2018-01-01')
series_info_2 <- fredr_series_search_id(search_text = "GCEC1",
limit = 100) %>%
filter(observation_start=='1947-01-01') %>%
filter(last_updated>='2018-01-01')
series_info_3 <- fredr_series_search_id(search_text = "GPDIC1",
limit = 100) %>%
filter(observation_start=='1947-01-01') %>%
filter(last_updated>='2018-01-01')
series_info_4 <- fredr_series_search_id(search_text = "EXPGSC1",
limit = 100) %>%
filter(observation_start=='1947-01-01') %>%
filter(last_updated>='2018-01-01')
series_info_5 <- fredr_series_search_id(search_text = "IMPGSC1",
limit = 100) %>%
filter(observation_start=='1947-01-01') %>%
filter(last_updated>='2018-01-01')
# Data --------------------------------------------------------------------
PCECC96 <- fredr(series_id = "PCECC96",
observation_start = as.Date("1947-01-01"))
GCEC1 <- fredr(series_id = "GCEC1",
observation_start = as.Date("1947-01-01"))
GPDIC1 <- fredr(series_id = "GPDIC1",
observation_start = as.Date("1947-01-01"))
EXPGSC1 <- fredr(series_id = "EXPGSC1",
observation_start = as.Date("1947-01-01"))
IMPGSC1 <- fredr(series_id = "IMPGSC1",
observation_start = as.Date("1947-01-01"))
# Net export calc ---------------------------------------------------------
NET_EXPORT <- EXPGSC1 %>%
left_join(IMPGSC1,by='date') %>%
transmute(value=value.x-value.y, Date=date) %>%
mutate(series_id="NET_EXPORT") %>%
select(Date, series_id, value)
# Combining series --------------------------------------------------------
GDP_COMPONENTS <- bind_rows(PCECC96,GCEC1,GPDIC1) %>%
spread(series_id,value) %>%
left_join(NET_EXPORT, by=c("date"="Date")) %>%
mutate(NET_EXPORT=value) %>%
select(-series_id,-value)
# XTS object --------------------------------------------------------------
GDP_COMPONENTS_xts <- xts(GDP_COMPONENTS[,-1], order.by=GDP_COMPONENTS$date)
# Adding Recession Data ---------------------------------------------------
url_Recession_Data <- "https://fredhelp.stlouisfed.org/fred/data/understanding-the-data/recession-bars/"
webpage <- read_html(url_Recession_Data)
Recession_Data_html <- html_nodes(webpage,'p')
Recession_Data <- html_text(Recession_Data_html[3])
Recession_Data_df <- Recession_Data %>%
str_split("\n", simplify = FALSE) %>%
unlist() %>%
as.data.frame() %>%
transmute(PEAK=str_sub(`.`,1,10),TROUGH=str_sub(`.`,13,22)) %>%
filter(PEAK!='Peak, Trou') %>%
mutate(PEAK=as.Date(PEAK), TROUGH=as.Date(TROUGH)) %>%
filter(TROUGH>=min(GDP_COMPONENTS$date))
# dygraph -----------------------------------------------------------------
dygraph_GDP <- dygraph(GDP_COMPONENTS_xts, main = "Components of GDP: Compare their contributions from 1947 to 2018") %>%
dyRangeSelector() %>%
dySeries("PCECC96", label = series_info_1$title) %>%
dySeries("GCEC1", label = series_info_2$title) %>%
dySeries("GPDIC1", label = series_info_3$title) %>%
dySeries("NET_EXPORT", label = paste0(series_info_4$title," - ", series_info_5$title)) %>%
# dySeries("NET_EXPORT", label = paste0(series_info_4$title," - ", str_to_title(series_info_5$title, locale = "EN"))) %>%
dyOptions(fillGraph = TRUE, fillAlpha = 0.4, maxNumberWidth = 8, stackedGraph = TRUE) %>%
dyAxis("y", label = "Billions of Chained 2012 Dollars", axisLabelWidth = 70) %>%
dyLegend(show = "follow", hideOnMouseOut = TRUE, labelsSeparateLines = TRUE) %>%
dyShading(from = Recession_Data_df$PEAK[1], to = Recession_Data_df$TROUGH[1]) %>%
dyShading(from = Recession_Data_df$PEAK[2], to = Recession_Data_df$TROUGH[2]) %>%
dyShading(from = Recession_Data_df$PEAK[3], to = Recession_Data_df$TROUGH[3]) %>%
dyShading(from = Recession_Data_df$PEAK[4], to = Recession_Data_df$TROUGH[4]) %>%
dyShading(from = Recession_Data_df$PEAK[5], to = Recession_Data_df$TROUGH[5]) %>%
dyShading(from = Recession_Data_df$PEAK[6], to = Recession_Data_df$TROUGH[6]) %>%
dyShading(from = Recession_Data_df$PEAK[7], to = Recession_Data_df$TROUGH[7]) %>%
dyShading(from = Recession_Data_df$PEAK[8], to = Recession_Data_df$TROUGH[8]) %>%
dyShading(from = Recession_Data_df$PEAK[9], to = Recession_Data_df$TROUGH[9]) %>%
dyShading(from = Recession_Data_df$PEAK[10], to = Recession_Data_df$TROUGH[10]) %>%
dyShading(from = Recession_Data_df$PEAK[11], to = Recession_Data_df$TROUGH[11])
dygraph_GDP
|
f362b90ef76e489ae2c5454dbcacc3878d3ed46a
|
2e12507002a5c68f8fba51995879cbc07b5160ad
|
/project.R
|
c4a7246d657c033515dda5b7143a6b2b36c22f55
|
[] |
no_license
|
shobacherian/datamungingcoursera
|
a28843a3c29f0dcdbb9ade7cf0fa6f46c8089ecb
|
859bcaffb400c9d6b442895777a9fa431989deb1
|
refs/heads/master
| 2021-01-15T12:14:09.086744
| 2015-05-05T11:32:36
| 2015-05-05T11:32:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,830
|
r
|
project.R
|
# Project Work
simulatorCentral = function(rfun, dfun, ssize=40, runs=1000, title="Course Project: Central Limit Theorem",...) {
# Initialize
# m: stores the means of samples
# runs: no. of simulations
# ssize: sample size
m <- double(0);
for (i in 1:runs) m[i] = mean(rfun(ssize, ...)); # Collect sample means
ms <- scale(m); # Scale for plotting
plotSet() # Set up for plotting, 1x3 plots
plotterA(ms) # Plot the distribution of sample means
plotterB(rfun, dfun, ...); # Plot the underlying distribution
return(m)
};
plotSet = function() {
par(mfrow = c(1,3));
par(mar = c(5,2,5,1) + 0.1);
}
plotterA = function(ms) { # Plot the distribution of sample means
hist(ms, probability = TRUE,
col = "light grey", border = "grey",
main = "Sample Means", ylim = c(0,0.5)); # Plot: Histogram
lines(density(ms)); # Density plot
curve(dnorm(x,0,1), -3, 3, col = "blue", lwd = 2, add = TRUE); # Std. normal distribution
qqnorm(ms); # Add: Q-Q Plot
}
plotterB = function(rfun, dfun, ...) { # Plot the underlying distrbution
samples <- rfun(1000, ...)
# Plot: Histogram
hist(samples, prob = TRUE,
main = "Original Distribution",
col = "light gray", border = "gray")
lines(density(samples), lty = 2)
curve(dfun(x, ...), lwd = 2, col = "firebrick", add = TRUE)
rug(samples)
}
simulatorCentral(runif, dunif);
simulatorCentral(rexp, dexp);
simulatorCentral(rexp, dexp, rate = 4);
simulatorCentral(rexp, dexp, rate = 0.2);
simulatorCentral(rpois, dpois, lambda = 7);
simulatorCentral(rbinom, dbinom, size = 10, prob = 0.3);
#simulatorCentral(df=13, rt, dt);
|
38fc5b61ccf2d206168113270867429a07c2a93f
|
570abc2b93f05cbce92d95f6b9bffbe48708bb6c
|
/R_source/analise_dados.R
|
d1fbf9821440df3b11bbd0e33bc51c932dd492e6
|
[] |
no_license
|
Danhisco/artigo_mestrado
|
ed871edab0e89e28a5569668a670ea43d3f548e3
|
1ff761608ea7312c533ef216b51fd67fb8268b00
|
refs/heads/master
| 2023-02-03T00:15:42.720220
| 2023-01-30T02:13:25
| 2023-01-30T02:13:25
| 121,779,367
| 0
| 1
| null | 2018-11-01T11:05:29
| 2018-02-16T17:28:00
|
HTML
|
UTF-8
|
R
| false
| false
| 5,284
|
r
|
analise_dados.R
|
###A ideia é ter dois data frames: i) refID+range+U_est+riqueza+KS+tree_cover; ii) refID+range+abundance
##
require(plyr)
require(dgof)
require(magrittr)
require(reshape2)
require(dplyr)
source("~/Documents/dissertacao/R_source/sad_media.R")
##Leitura e arranjo dos dados simulados
#o primeiro elemento de cada elemento da lista será transformado em um data frame com o nome da espécie e a frequência dessa espécie
#load("SAD_sim_all_ranges.Rdata") #carregando os dados brutos da simulação, N_simul = 30
load("/home/danilo/Documents/dissertacao/dados/simulacao/resultados/sim_6jun.Rdata") #lista que contem duas listas, uma com os outputs brutos das simulações e outra com um df com as taxas de especiação
##reshape a U_est wide -> long
U_est <- sim_6jun[[2]]
refID <- U_est %>% rownames %>% as.numeric
U_est %<>% melt
names(U_est) <- c("range","U_est")
U_est %<>% cbind(refID,.)
exc_rows <- U_est[U_est$refID %in% c(847,831,889), ] %>% rownames %>% as.numeric
U_est <- U_est[-exc_rows, ]
U_est$refID %<>% factor #para tirar os levels não usados
#U_est %>% head
##Calculando a SAD media para cada range e para cada refID
sad_sim <- sim_6jun[[1]] #pegando o output bruto das simulações, não o df de taxas de especiação estimada
#calculando as SADs medias e organizando em um formato único - mesmo formato do treeco
for(b in 1:(sad_sim %>% length) ){
for(c in 1:(sad_sim[[b]] %>% length) ){
sad_sim[[b]][[c]] %<>% sad_media
sad_sim[[b]][[c]] <- data.frame(SAD = names(sad_sim[[b[c]]]), abundance = sad_sim[[b]][[c]], range = names(sad_sim)[b] )
}
}
#juntando todas as SADs em apenas um data frame
sad_sim %<>% rbind.fill
names(sad_sim) <- c("refID","N","range")
#sad_sim %>% str
#juntando todos as SADs em apenas um data frame por range
for(d in 1:(sad_sim %>% length) ){
sad_sim[[d]] %<>% rbind.fill
}
##KS para cada valor de range
#Leitura e arranjo dos dados observados
treeco <- read.table(file = "///home/danilo/treeco_paisagens_selecionadas.txt", header = T, sep = "\t", dec = ".")
SAD_obs <- read.csv(file = "///home/danilo/abundances-1.csv",header = TRUE, sep = "\t", dec = ".")
SAD_obs <- SAD_obs[,c("RefID","species.correct","N")]
names(SAD_obs) <- c("refID","sp","N")
SAD_obs %<>% filter(sp != "Mortas", N != 0)
#criando os objetos que vão armazenar os resultados do teste
KS <- as.data.frame( matrix(ncol = ranges %>% length, nrow = refIDs %>% length) )
p_value <- as.data.frame( matrix(ncol = ranges %>% length, nrow = refIDs %>% length) )
#aplicando o teste e realocando o output
for(g in 1:dim(KS)[2] ){
for(h in 1:dim(KS)[1] ){
a <- sad_sim %>% filter(refID == refIDs[h], range == ranges[g]) %>% select(N)
b <- SAD_obs %>% filter(refID == refIDs[h]) %>% arrange(., desc(N)) %>% select(N)
teste_ks <- suppressWarnings( ks.test(x = a, y = b$N) ) #por algum motivo eu tive que colocar o $N no 'b', contudo no 'a' não.
KS[h,g] <- teste_ks$statistic
p_value[h,g] <- teste_ks$p.value
}
}
for(e in 1:dim(df_KS)[1]){
SAD.obs <- df_SAD.obs.f %>% filter(SiteCode %in% df_KS[e,1]) %>% arrange(.,desc(N)) %>% .$N
SAD.sim <- df_SAD.sim0 %>% filter(SiteCode %in% df_KS[e,1])
for(f in 1:length(levels(SAD.sim$sindrome) ) ){
sad.y <- SAD.sim %>% filter(sindrome %in% levels(SAD.sim$sindrome)[f]) %>% arrange(.,desc(N)) %>% .$N
teste_ks <- suppressWarnings( ks.test(x = SAD.obs, y = sad.y) )
df_KS[e,(f+1)] <- teste_ks$statistic
}
}
#meltando KS e p_value
KS %<>% melt
names(KS) <- c("fator","KS")
p_value %<>% melt
names(p_value) <- c("fator","p_value")
###Colando tudo em apenas um data_frame
df_resultados <- cbind(U_est, riqueza_df$riqueza, KS$KS, p_value$p_value)
names(df_resultados)[4:6] <- c("riqueza","KS","p_value")
###Arrumando a ordem
df_resultados$range <- as.numeric( levels(df_resultados$range) )[df_resultados$range]
df_resultados %<>% arrange(range,refID)
df_resultados$range %<>% factor
###Adicionando a riqueza observada
S_obs <- data.frame(refID = refIDs, range = NA, U_est = NA,
riqueza = treeco %>% filter(refID %in% refIDs) %>% select(S),
KS = NA, p_value = NA )
names(S_obs)[4] <- "riqueza"
df_resultados <- rbind(df_resultados,S_obs)
levels(df_resultados$range)[8] <- "obs"
df_resultados[is.na(df_resultados$U_est),"range"] <- "obs"
###Adicionando a cobertura vegetal
##abrir 'cobertura_vegetal' e realizar o calculo
reference <- df_resultados$refID %>% levels %>% as.numeric
df_resultados$tree_cover <- NA
df_resultados$tree_cover <- (df_mat_tri %>% filter(refID %in% reference) %>% .$cobertura)
##Juntar tudo em um data frame único
save(df_resultados,sad_sim, file = "resultados_sim6jun.Rdata")
##Calculando a riqueza
#Vou criar um data frame e depois vou usar o melt, como fiz com o U_est
ranges <- sad_sim$range %>% levels
refIDs <- c(1067,178,246,2817,437,619,677,868,870,887,89,8,987) %>% sort #se tirar o sort, a riqueza observada fica fora de ordem
riqueza_df <- as.data.frame( matrix(ncol = ranges %>% length, nrow = refIDs %>% length) )
for(e in 1:dim(riqueza_df)[2]){
for(f in 1:dim(riqueza_df)[1]){
riqueza_df[f,e] <- sad_sim %>% filter(refID == refIDs[f], range == ranges[e]) %>% nrow
}
}
riqueza_df %<>% melt
names(riqueza_df) <- c("fator_igual_U_est","riqueza")
|
cbfe871c6dce91b4264956bdccaf58e656b34d5a
|
051880099402393c9249d41526a5ac162f822f8d
|
/tests/testthat.R
|
49b250972d642fb7f411309b2cb957367bd4a345
|
[
"MIT"
] |
permissive
|
bbTomas/rPraat
|
cd2b309e39e0ee784be4d83a980da60946f4c822
|
4c516e1309377e370c7d05245f6a396b6d4d4b03
|
refs/heads/master
| 2021-12-13T19:32:38.439214
| 2021-12-09T18:42:48
| 2021-12-09T18:42:48
| 54,803,225
| 21
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(rPraat)
test_check("rPraat")
|
487f0e51e25863cdf93885ef65205a97d7135715
|
01a614cc641ab1c650a874ee679b6cc2ab1f4c0e
|
/R/mo_engen_gr.R
|
1eace0f99f6a5b0f13d39d9d6a9ba6b068beafda
|
[] |
no_license
|
kfullzz/590_final
|
ce7c6b5bd7c320e0a94002bf4096f9ca4bbd0525
|
fd89cad80b265bd579f2cffdaaae1b9bedc5ea31
|
refs/heads/master
| 2021-01-23T01:07:56.117943
| 2017-05-02T22:48:00
| 2017-05-02T22:48:00
| 85,877,006
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
mo_engen_gr.R
|
mo_engen_gr <- function(df, col, plant, x, y, z) {
df_gr <- df[df[ , col] == plant, ]
ggplot(df_gr, aes(df_gr[ , x], df_gr[ , y])) +
geom_point() +
xlab(x) +
ylab(y) +
scale_x_continuous(breaks=seq(1,12,1)) +
facet_wrap(~ df_gr[ , z])
}
|
72116e7a7a35a6e7f39a9e932280bfec6914fd86
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988480-test.R
|
de8e231d82e30f18f2f95febc9f789e39743e83b
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
1612988480-test.R
|
testlist <- list(lims = structure(0, .Dim = c(1L, 1L)), points = structure(c(1.61035410759544e-317, 1.44131612505985e-83, 7.30643551760841e-309, 3.9934513075558e-305, 5.42219658867978e-312), .Dim = c(5L, 1L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
36f0cbb835ad8c4e7869211809b59c0326ade1c7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/quanteda/examples/corpus_sample.Rd.R
|
054520e1bbe9ff37204708358c8e43fe9015e4d1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 640
|
r
|
corpus_sample.Rd.R
|
library(quanteda)
### Name: corpus_sample
### Title: Randomly sample documents from a corpus
### Aliases: corpus_sample
### Keywords: corpus
### ** Examples
# sampling from a corpus
summary(corpus_sample(data_corpus_inaugural, 5))
summary(corpus_sample(data_corpus_inaugural, 10, replace = TRUE))
# sampling sentences within document
doccorpus <- corpus(c(one = "Sentence one. Sentence two. Third sentence.",
two = "First sentence, doc2. Second sentence, doc2."))
sentcorpus <- corpus_reshape(doccorpus, to = "sentences")
texts(sentcorpus)
texts(corpus_sample(sentcorpus, replace = TRUE, by = "document"))
|
2e7eff824415caf16d8e7b726fc6a9c69ca58154
|
1ebd3a1132c11fc4c8848381d5fcb7eb7d6dfebd
|
/man/SSVS.Rd
|
0401b34ad55c35e521a2f5d742eda0159fc92bab
|
[] |
no_license
|
SValv/BayesianEstimation
|
7e4caf5e717eb3f64e891d48433bac1caf1108c5
|
ffb871c1994cb4bf62247bec4bff696f745a50d0
|
refs/heads/main
| 2023-07-08T17:08:38.269286
| 2021-08-04T14:21:37
| 2021-08-04T14:21:37
| 380,739,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 601
|
rd
|
SSVS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UserFunctions.R
\name{SSVS}
\alias{SSVS}
\title{Load a Matrix}
\usage{
SSVS(
y,
x,
nsave = 1000,
nburn = 1000,
tau0 = 0.01,
tau1 = 10,
S0 = 0.01,
PriorSemiScaling = F,
scaling = T
)
}
\arguments{
\item{y}{Path to the input file}
}
\value{
a Ssvs object
}
\description{
This function loads a file as a matrix. It assumes that the first column
contains the rownames and the subsequent columns are the sample identifiers.
Any rows with duplicated row names will be dropped with the first one being
kepted.
}
|
292b2f92e6dae3914832141f5228c92efbcea7dd
|
37794cfdab196879e67c3826bae27d44dc86d7f7
|
/Math/Poly.System.S5.Ht.Formulas.Derivation.R
|
d78c0f9d3685605c1d993f54e60569473593279b
|
[] |
no_license
|
discoleo/R
|
0bbd53a54af392ef53a6e24af85cec4f21133d17
|
e9db8008fb66fb4e6e17ff6f301babde0b2fc1ff
|
refs/heads/master
| 2023-09-05T00:43:32.381031
| 2023-08-31T23:03:27
| 2023-08-31T23:03:27
| 213,750,865
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,494
|
r
|
Poly.System.S5.Ht.Formulas.Derivation.R
|
########################
###
### Leonard Mada
### [the one and only]
###
### Polynomial Systems
### S5: Hetero-Symmetric
### Useful Formulas
###
### draft v.0.1j
### Derivations
# - Basic derivations;
# - Numerical approaches: basic intuition;
####################
### Helper Functions
### Solver Tools
source("Polynomials.Helper.Solvers.Num.R")
# - is loaded automatically in "Solvers.Num.R";
# source("Polynomials.Helper.R")
source("Polynomials.Helper.EP.R")
### Other
# - check if 2 solutions are permutations of each other;
is.perm.S5 = function(s1, s2, tol=1E-6) {
if(length(s1) != 5 || length(s2) != 5) stop("Not solutions of S5!");
# E11b is different:
id = c(3,4,5,1,2);
e21 = sum(s1 * s1[id]);
e22 = sum(s2 * s2[id]);
d = round0(e21 - e22, tol=tol);
return(d == 0);
}
which.perm.S5 = function(s, tol=1E-6, verbose=TRUE) {
nr = nrow(s);
if(nr <= 1) return(array(0, c(0, 2)));
# ID of permuted solutions;
id = numeric(0);
for(i1 in seq(nr, 2, by=-1)) {
for(i2 in seq(i1 - 1)) {
if(is.perm.S5(s[i1,], s[i2,])) {
id = c(id, i2, i1); break;
}
}
}
id = matrix(id, nrow=2);
if(ncol(id) == 0) {
if(verbose) cat("No duplicates!\n");
return(invisible(id));
}
return(id);
}
as.conj.S5 = function(x, nrow, rm.rows=0) {
id = seq(7);
if(any(rm.rows > 0)) {
id = id[-rm.rows];
}
r = x[id,];
r = rbind(r, as.conj(x[nrow,]));
return(r);
}
neg = function(x, id=1) {
x[id] = - x[id];
return(x);
}
# Compare specific coefficients of E11b;
cmpE11b = function(p1, p2, n=NULL, xn="E11b") {
if( ! is.null(n)) {
id = match(xn, names(p1));
if( ! is.na(id)) {
p1 = p1[p1[,id] == n, - id, drop=FALSE];
}
id = match(xn, names(p2));
if( ! is.na(id)) {
p2 = p2[p2[,id] == n, - id, drop=FALSE];
}
}
pR = diff.pm(p1, p2);
return(pR);
}
#######################
#######################
### Base-System
### Numerical Solution:
# Helper Functions: for Baseline system
source("Poly.System.S5.Ht.Formulas.Derivation.HS0.R")
# Functions with the coefficients
source("Poly.System.S5.Ht.Formulas.Derivation.Coeffs.R")
### List of Initial values
# - is loaded automatically in file HS0.R;
# source("Poly.System.S5.Ht.Formulas.Derivation.x0.R")
###################
###################
# Note:
# - the double permutation (x1, x3) & (x4, x5) like c(x3, x2, x1, x5, x4),
# and equivalent permutations (e.g. (x1, x2) & (x3, x5)),
# are also a solution;
# - Basic solution moved to file:
# Poly.System.S5.Ht.Formulas.Derivation.SolS0.R;
###################
### Case 3:
# S = 1; E11a = 0;
### Cases 4 - 6:
# S = 1; E11a = 1;
# S = 1; E11a = -1;
# S = -1; E11a = 1;
### List of Initial values
# - moved to file:
# Poly.System.S5.Ht.Formulas.Derivation.x0.R;
### Examples
### Using given Path:
# - deriving solution for: R2 = c(-2,-1,0,0,2.5)
# using a path from R2 = c(-5/3,-1,0,0,2.5);
R2 = c(-2,-1,0,0,2.5)
path = lapply(c(-5/3, -1.8, -2), function(R1) c(R1, -1,0,0,2.5));
x0 = x0All$Vn1fn12f
x.all = solve.path(solve.S5HtMixed.Num, x0, path=path, debug=T)
x.all = x.all[-2,]
### Example plot.path:
tmp = plot.path.S5(c(5,3,1,0,2), c(1,3,1,0,2), "E3V131", steps=101)
tmp = plot.path.S5(c(-5,3,1,0,2), c(-1,3,1,0,2), "E3Vn131", steps=101)
### Ex 2:
R2 = c(-1,1,0,0,2)
x0 = x0All$Vn11
x.all = solve.all(solve.S5HtMixed.Num, x0, R=R2, debug=F)
poly.calc(apply(x.all, 1, function(x) sum(x * x[c(3,4,5,1,2)]))) * 27
### E3:
R2 = c(0,0,1,0,2)
path = lapply(c(1/3, 2/3, 1), function(x) c(0, 1 - x, x,0,2));
x0 = x0All$V01
x.all = solve.path(solve.S5HtMixed.Num, x0, path=path, debug=T)
which.perm.S5(x.all) # all roots OK;
x.all = polyS(R2, "E3V001")
x.all = polyS(R2, "E3V011")
R2 = c(0,0,0,1,2)
x.all = polyS(R2, "E4V0001")
max.pow.S(c("E4V1011", "E4Vn1011", "E4V2011", "E4Vn2011"), pow=0, FUN=f0, R=c(1,0,1,1,2), R2=c(3,0,1,1,2), skip.path=T)
max.pow.S(c("E4V1011", "E4V101n1", "E4V1011", "E4V101n1"), pow=0, FUN=f0, npos=4, R=c(1,0,1,1,2), R2=c(1,0,1,1.3,2), skip.path=T)
fn = function(R) {
Rn = R; Rn[1] = - Rn[1];
R4 = R; R4[4] = - R4[4]; R4n = R4; R4n[1] = - R4n[1];
(f0(R) - f0(Rn) - (f0(R4) - f0(R4n)))/4;
}
cc = list(c(1,0,1,1,2), c(2,0,1.1,1.3,2), c(1.3,0,0.9,1,2.1),
c(2.1,0,0.9,1.1,2.3), c(2.2,0,1,0.8,1.9), c(3,0,1.2,0.9,2.2), c(3.3,0,4/5,1.2,2.2));
tmp = sapply(cc, function(R) polyS(R, "E4V1011"));
c0 = c() / 2;
c1 = c() / 2;
c0 = (c0 - c1)/2;
fc = function(R) { S = R[1]; E3 = R[3]; E4 = R[4]; E5 = R[5];
c() / E5^2; }
m = sapply(cc, fc)
solve(t(m), c0 - sapply(cc, fn))
27*(E11a^7 + E11b^7)*E5^2 +
# x^6
- (E11a^6 + E11b^6)*(27*E5^2*S^2 - 4*E4^3 + 18*E3*E4*E5) - (E11a*E11b)^6 +
+ 81*E11a*E11b*(E11a^5 + E11b^5)*E5^2 + 9*(E11a*E11b)^3*(E11a^3 + E11b^3)*E5*S +
# x^5:
+ (E11a^5 + E11b^5)*(9*E5^2*S^4 - E4^3*S^2 + 36*E3*E4*E5*S^2 - 90*E3*E5^2*S - 3*E4^2*E5*S +
+ 4*E3^3*E5 - 225*E4*E5^2 - E3^2*E4^2) +
+ (E11a*E11b)*(E11a^4 + E11b^4)*(9*E3^2*E5*S) +
+ (E11a*E11b)^2*(E11a^3 + E11b^3)*(27*E5^2 - 18*E3*E5*S^2) +
- (E11a*E11b)^3*(E11a^2 + E11b^2)*(2*E5*S^3 + 15*E3*E5) +
- (E11a*E11b)^4*(E11a + E11b)*(3*E5*S + 2*E3^2) +
+ 4*(E11a*E11b)^5*E3*S +
# x^4: Note: 1 term from x^5 contributes as well;
+ (E11a^4 + E11b^4)*( - E5^2*S^6 - 16*E3*E4*E5*S^4 + 48*E3*E5^2*S^3 - 31*E4^2*E5*S^3 +
- 10*E3^3*E5*S^2 + 345*E4*E5^2*S^2 - 12*E3^2*E4^2*S^2 - 13*E3*E4^3*S + 5*E3^2*E4*E5*S +
+ 150*E3^2*E5^2 - 33*E4^4 + 155*E3*E4^2*E5) +
- (E11a*E11b)*(E11a^3 + E11b^3)*(21*E5^2*S^4 + 11*E3^3*E5 - 7*E3^2*E5*S^3 + 270*E3*E5^2*S) +
+ (E11a*E11b)^2*(E11a^2 + E11b^2)*(18*E5^2*S^2 - E3^4 + 4*E3*E5*S^4 + 36*E3^2*E5*S) +
- (E11a*E11b)^3*(E11a + E11b)*(10*E5^2 - 6*E3^3*S) +
+ (E11a*E11b)^4*(4*E5*S^3 + 20*E3*E5 - 6*E3^2*S^2) +
# x^3:
+ (E11a^3 + E11b^3)*(2*E3*E4*E5*S^6 - 6*E3*E5^2*S^5 + 16*E4^2*E5*S^5 +
+ 2*E3^3*E5*S^4 - 122*E4*E5^2*S^4 + 7*E3^2*E4^2*S^4 +
- 50*E5^3*S^3 + 27*E3*E4^3*S^3 - E3^2*E4*E5*S^3 +
+ 5*E3^2*E5^2*S^2 + 29*E4^4*S^2 - 189*E3*E4^2*E5*S^2 + 7*E3^4*E4*S^2 +
+ 15*E4^3*E5*S + 6*E3^4*E5*S + 27*E3^3*E4^2*S +
+ 375*E4^2*E5^2 + 29*E3^2*E4^3 - 105*E3^3*E4*E5) +
+ (E11a*E11b)*(E11a^2 + E11b^2)*(4*E5^2*S^6 - 2*E3^2*E5*S^5 + 68*E3*E5^2*S^3 +
- 27*E3^3*E5*S^2 + 2*E3^5*S - 375*E5^3*S + 275*E3^2*E5^2) +
+ (E11a*E11b)^2*(E11a + E11b)*(12*E5^2*S^4 - 3*E3^3*E5 +
+ 140*E3*E5^2*S - 6*E3^4*S^2 + 11*E3^2*E5*S^3) +
- (E11a*E11b)^3*(4*E3^4 + 68*E5^2*S^2 + 8*E3*E5*S^4 - 4*E3^3*S^3 + 26*E3^2*E5*S) +
# x^2:
+ (E11a^2 + E11b^2)*(- 2*E4^2*E5*S^7 - E3^2*E4^2*S^6 + 12*E4*E5^2*S^6 +
+ 14*E5^3*S^5 + 2*E3^2*E4*E5*S^5 - 14*E3*E4^3*S^5 +
- 21*E3^2*E5^2*S^4 - 17*E4^4*S^4 - 2*E3^4*E4*S^4 + 102*E3*E4^2*E5*S^4 +
+ 8*E3^4*E5*S^3 + 59*E4^3*E5*S^3 - 29*E3^3*E4^2*S^3 - 280*E3*E4*E5^2*S^3 +
- E3^6*S^2 + 500*E3*E5^3*S^2 - 500*E4^2*E5^2*S^2 - 29*E3^2*E4^3*S^2 + 87*E3^3*E4*E5*S^2 +
- 200*E3^3*E5^2*S + 625*E4*E5^3*S - 14*E3^5*E4*S + 115*E3^2*E4^2*E5*S - 11*E3*E4^4*S +
+ 12*E3^5*E5 - 5^5*E5^4 + 54*E4^5 - 17*E3^4*E4^2 - 250*E3^2*E4*E5^2 - 275*E3*E4^3*E5) +
+ (E11a*E11b)*(E11a + E11b)*(6*E3*E5^2*S^5 - 8*E3^3*E5*S^4 + 200*E5^3*S^3 + 2*E3^5*S^3 +
- 20*E3^2*E5^2*S^2 + 30*E3^4*E5*S + 5^4*E3*E5^3 - 2*E3^6) +
- (E11a*E11b)^2*(6*E5^2*S^6 - 4*E3^2*E5*S^5 + E3^4*S^4 + 60*E3*E5^2*S^3 + 24*E3^3*E5*S^2 +
+ 750*E5^3*S - 8*E3^5*S + 375*E3^2*E5^2) +
# x^1:
+ (E11a + E11b)*(2*E3*E4^3*S^7 + 7*E4^4*S^6 - 18*E3*E4^2*E5*S^6 +
- 62*E4^3*E5*S^5 + 6*E3^3*E4^2*S^5 + 86*E3*E4*E5^2*S^5 +
- 150*E3*E5^3*S^4 + 420*E4^2*E5^2*S^4 + 23*E3^2*E4^3*S^4 - 44*E3^3*E4*E5*S^4 +
+ 110*E3^3*E5^2*S^3 - 1250*E4*E5^3*S^3 - 3*E3*E4^4*S^3 - 108*E3^2*E4^2*E5*S^3 + 6*E3^5*E4*S^3 +
+ 5^5*E5^4*S^2 - 26*E3^5*E5*S^2 - 35*E4^5*S^2 + 23*E3^4*E4^2*S^2 +
+ 475*E3^2*E4*E5^2*S^2 + 185*E3*E4^3*E5*S^2 +
+ 2*E3^7*S - 1250*E3^2*E5^3*S + 100*E4^4*E5*S - 375*E3*E4^2*E5^2*S +
- 3*E3^3*E4^3*S - 40*E3^4*E4*E5*S +
+ 125*E3^4*E5^2 - 625*E4^3*E5^2 +
+ 7*E3^6*E4 + 175*E3^3*E4^2*E5 - 35*E3^2*E4^4 + 5^5*E3*E4*E5^3) +
- E11a*E11b*(28*E5^3*S^5 + 44*E3^2*E5^2*S^4 - 28*E3^4*E5*S^3 +
+ 4*E3^6*S^2 - 250*E3*E5^3*S^2 + 34*E3^5*E5 + 3*5^5*E5^4) +
# B0:
- E4^4*S^8 + 12*E4^3*E5*S^7 - 86*E4^2*E5^2*S^6 - 4*E3^2*E4^3*S^6 +
+ 300*E4*E5^3*S^5 - 2*E3*E4^4*S^5 + 44*E3^2*E4^2*E5*S^5 +
+ 10*E4^5*S^4 - 5^4*E5^4*S^4 - 6*E3^4*E4^2*S^4 - 220*E3^2*E4*E5^2*S^4 - 38*E3*E4^3*E5*S^4 +
+ 500*E3^2*E5^3*S^3 - 60*E4^4*E5*S^3 + 250*E3*E4^2*E5^2*S^3 - 4*E3^3*E4^3*S^3 + 52*E3^4*E4*E5*S^3 +
- 150*E3^4*E5^2*S^2 + 250*E4^3*E5^2*S^2 + 19*E3^2*E4^4*S^2 - 80*E3^3*E4^2*E5*S^2 +
- 4*E3^6*E4*S^2 - 1250*E3*E4*E5^3*S^2 +
+ 20*E3^6*E5*S - 2*E3^5*E4^2*S + 10*E3*E4^5*S - 150*E3^2*E4^3*E5*S + 500*E3^3*E4*E5^2*S +
- E3^8 - 25*E4^6 - 50*E3^5*E4*E5 + 10*E3^4*E4^3 - 625*E3^2*E4^2*E5^2 + 250*E3*E4^4*E5 # = 0
max.pow.S(c("E3V101", "E3Vn101", "E3V501", "E3Vn501"), pow=0, FUN=f0, skip.path=T, R=c(1.1,0,3/4,0,2), R2=c(5,0,3/4,0,2))
solve.coeff(c(1.1,0,3/4,0,2), c(5,0,3/4,0,2), c(-2967.058 + 4468.348, -1493365 + 1634008) / 2,
"c(E3^6*S/E5, E3^2*E5*S^3)", function(R) { Rn = R; Rn[1] = - Rn[1]; (f0(R) - f0(Rn))/2; })
# before f3 was updated;
solve.coeff(c(1,0,1,0,2.2), c(5,0,1,0,2.2), c(- 107.3636 - 119.1818, - 31793.18 - 33179.55) / 2,
"c(E3^4*S/E5, E3*S^5)", function(R) { Rn = R; Rn[1] = - Rn[1]; (f3(R) - f3(Rn))/2; })
solve.coeff(c(1,0,1,0,2), c(5,0,1,0,2), c(- 97 - 109, - 30485 - 31985) / 2,
"c(E3^4*S/E5, E3*S^5)", function(R) { Rn = R; Rn[1] = - Rn[1]; (f3(R) - f3(Rn))/2; })
### Simple Examples:
### S = 0
R2 = c(0,1,0,0,2)
x0 = x0All$V01;
x.all = solve.all(solve.S5HtMixed.Num, x0, R=R2, debug=F)
poly.calc(apply(x.all, 1, function(x) sum(x * x[c(3,4,5,1,2)]))) * 27
# round0(poly.calc(x.all)) * 27
-12473 - 37419*x - 12473*x^2 - 10*x^3 - 10*x^4 + 27*x^5 + 80.75*x^6 + 27*x^7
### E11a = 0
R2 = c(1,0,0,0,2)
-2500 + 12500*x - 12472*x^2 - 100*x^3 - 1*x^4 + 9*x^5 - 27*x^6 + 27*x^7
R2 = c(1,0,0,0,3/2)
-1406.25 + 7031.25*x - 7010.25*x^2 - 75*x^3 - 1*x^4 + 9*x^5 - 27*x^6 + 27*x^7
### Examples: S & E11a
R2 = c(1,1/3,0,0,2)
277.185 - 2*x - 12505.22*x^2 - 349.9733*x^3 - 6.351853*x^4 + 11.94444*x^5 + 0.1663215*x^6 + 27*x^7
R2 = c(1,-1/3,0,0,2)
-8048.84 + 25090.59*x - 12773*x^2 + 152.4053*x^3 + 8.401235*x^4 + 12.01852*x^5 - 54.16701*x^6 + 27*x^7
R2 = c(-1,1/3,0,0,2)
278.3705 + 2*x - 12494.56*x^2 + 350.0226*x^3 - 6.388888*x^4 + 12.05556*x^5 - 0.1670072*x^6 + 27*x^7
R2 = c(1,1,0,0,2)
-2564 - 25342*x - 13521*x^2 - 908.5*x^3 - 13.5*x^4 + 33.5*x^5 + 58.25*x^6 + 27*x^7
R2 = c(1,-1,0,0,2)
-27436 + 51262*x - 14399*x^2 + 721.5*x^3 + 51.5*x^4 + 35.5*x^5 - 112.75*x^6 + 27*x^7
R2 = c(-1,1,0,0,2)
-2420 - 24530*x - 11377*x^2 + 784.5*x^3 - 14.5*x^4 + 38.5*x^5 + 49.25*x^6 + 27*x^7
R2 = c(-1,-1,0,0,2)
-27692 + 48850*x - 10655*x^2 - 589.5*x^3 + 44.5*x^4 + 36.5*x^5 - 103.75*x^6 + 27*x^7
R2 = c(1,1,0,0,3)
-5725 - 56795*x - 29682*x^2 - 1334.667*x^3 - 13.66667*x^4 + 34.33333*x^5 + 56.88889*x^6 + 27*x^7
R2 = c(-1,1,0,0,3)
-5509 - 55577*x - 26466*x^2 + 1210.667*x^3 - 14.33333*x^4 + 37.66667*x^5 + 50.88889*x^6 + 27*x^7
### E3:
R2 = c(0,0,1,0,2)
-0.25 + 125*x - 12494*x^2 + 150*x^4 + 2*x^5 + 27*x^7
### E11a & E3:
R2 = c(0,1,1,0,2)
-12190.25 - 35792*x - 11594.25*x^2 + 255*x^3 + 143.75*x^4 + 21*x^5 + 80.75*x^6 + 27*x^7
### Other:
R2 = c(5,1/3,0,0,2)
x0 = x0All$V50f
x.all = solve.all(solve.S5HtMixed.Num, x0, R=R2, debug=F)
poly.calc(apply(x.all, 1, function(x) sum(x * x[c(3,4,5,1,2)]))) * 27
###
R2 = c(5/4, -1/3,0,0,2)
x0 = x0All$V10
x.all = solve.all(solve.S5HtMixed.Num, x0, R=R2, debug=F)
poly.calc(apply(x.all, 1, function(x) sum(x * x[c(3,4,5,1,2)]))) * 27
######################
######################
### Robust Derivation:
x = x.all[1,]; E3 = E4 = 0;
x1 = x[1]; x2 = x[2]; x3 = x[3]; x4 = x[4]; x5 = x[5];
s1 = x1 + x2; p1 = x1 * x2;
s2 = x3 + x4 + x5; e2 = (x3 + x4)*x5 + x3*x4; e3 = x3*x4*x5;
S = s1 + s2; E5 = p1*e3;
E11a = x1*x2 + x2*x3 + x3*x4 + x4*x5 + x5*x1;
E11b = x1*x3 + x2*x4 + x3*x5 + x4*x1 + x5*x2;
E2 = E11a + E11b;
###
### Transformed P[5] System:
s1 + s2 - S # = 0
#
s1*S - s1^2 + p1 + e2 - E2 # = 0
s1*e2 - s1*p1 + S*p1 + e3 - E3 # = 0
s1*e3 + p1*e2 - E4 # = 0
p1*e3 - E5 # = 0
x5^3 + s1*x5^2 + e2*x5 - e3 - S*x5^2 # = 0
#
s1*S - s1^2 + p1 + e2 - E2 # = 0
s1*p1*e2 - s1*p1^2 + S*p1^2 - p1*E3 + E5 # = 0
s1*E5 + p1^2*e2 - p1*E4 # = 0
p1*x5^3 + p1*s1*x5^2 + p1*e2*x5 - p1*S*x5^2 - E5 # = 0
p1 = toPoly.pm("s1*S - s1^2 + p1 + e2 - E2")
p2 = toPoly.pm("s1*p1*e2 - s1*p1^2 + S*p1^2 - p1*E3 + E5")
p3 = toPoly.pm("s1*E5 + p1^2*e2 - p1*E4")
p4 = toPoly.pm("p1*x5^3 + p1*s1*x5^2 + p1*e2*x5 - p1*S*x5^2 - E5")
pR1 = solve.lpm(p1, p4, p2, xn=c("p1", "e2"))
pR2 = solve.lpm(p1, p4, p3, xn=c("p1", "e2"))
pR1 = pR1[[2]]$Rez; pR1$coeff = - pR1$coeff;
pR2 = pR2[[2]]$Rez; pR2$coeff = - pR2$coeff;
table(pR2$s1)
tmp = gcd.pm(pR1, pR2, by="s1")
pR2 = diff.pm(pR2, mult.pm(pR1, toPoly.pm("x5^3")))
# Note: coeff a == 0!
x5^2*(S - x5)*(x5^5 - S*x5^4 + E2*x5^3 - E3*x5^2 + E4*x5 - E5)*s1^2 +
- x5^2*(S - x5)^2*(x5^5 - S*x5^4 + E2*x5^3 - E3*x5^2 + E4*x5 - E5)*s1 +
- E5^2 + 2*E4*E5*x5 - E4^2*x5^2 - E5*E2*S*x5^2 + E5*E2*x5^3 + E4*E2*S*x5^3 + E5*S^2*x5^3 - E4*E2*x5^4 +
- 2*E5*S*x5^4 - E4*S^2*x5^4 + E5*x5^5 + 2*E4*S*x5^5 + E2^2*S*x5^5 - E4*x5^6 - 2*E2*S^2*x5^6 +
+ 2*E2*S*x5^7 + S^3*x5^7 - 2*S^2*x5^8 + S*x5^9 - E2*S*x5^4*E3 - E2*x5^5*E3 + S^2*x5^5*E3 +
- x5^7*E3 + x5^4*E3^2 # = 0
|
993c7665c61391389de11889e862d81efb8838a3
|
90c5c8a79cb01f1a2475f01f8c0e4ba539492956
|
/Scripts/R_Scripts/Data_setup_scripts/add_chromatin_state_annotation_to_genotypes.R
|
445d986c5bb95ff755c1d38a4158075f03740c13
|
[] |
no_license
|
JacobBergstedt/MIMETH
|
626725179fb37adf3853adafd19ccf33c4c1623a
|
c475440ee5bb3389fae72f1684d270641884ce0a
|
refs/heads/main
| 2023-04-15T03:18:12.731765
| 2022-08-23T13:36:50
| 2022-08-23T13:36:50
| 527,968,587
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
add_chromatin_state_annotation_to_genotypes.R
|
library(tidyverse)
library(glue)
library(vroom)
library(GenomicRanges)
source("./Scripts/R_scripts/Libraries/functions_for_annotation.R")
map_labex <- readRDS("./Data/RData/Genotypes/LabExMI_imputation_1000x5699237_annotated_map_with_ancestral.rds") %>%
mutate(SNP_chr = paste0("chr", SNP_chr))
map_labex_roadmap <- map_labex %>%
add_15_state_annotation(chr_col = "SNP_chr", position_col = "SNP_position", genomic_feature = "SNP_ID")
map_labex <- inner_join(map_labex, map_labex_roadmap)
saveRDS(map_labex, "./Data/RData/Genotypes/LabExMI_imputation_1000x5699237_annotated_map.rds")
|
5118894202e519571c366cc669e9af0ec37ee9ee
|
94b199a2541bf48ecd5bdeacf3ac7f3cb96613fd
|
/man/MegaLMM_control.Rd
|
76432165c144b59d379e26d684318944c7973145
|
[
"MIT"
] |
permissive
|
deruncie/MegaLMM
|
e83c23851ef7dfdb0fbc9db702a854ea0fbd5fc0
|
e9e93c542949213597d3adb02c1bdf52ce4633ea
|
refs/heads/master
| 2023-06-09T10:28:46.851382
| 2023-05-15T18:53:30
| 2023-05-15T18:53:30
| 260,344,286
| 32
| 14
|
MIT
| 2022-12-07T20:07:24
| 2020-05-01T00:01:28
|
R
|
UTF-8
|
R
| false
| true
| 4,071
|
rd
|
MegaLMM_control.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MegaLMM_master.R
\name{MegaLMM_control}
\alias{MegaLMM_control}
\title{Set MegaLMM run parameters}
\usage{
MegaLMM_control(
which_sampler = list(Y = 1, F = 1),
run_sampler_times = 1,
scale_Y = c(T, F),
K = 20,
h2_divisions = 100,
h2_step_size = NULL,
drop0_tol = 1e-14,
K_eigen_tol = 1e-10,
burn = 100,
thin = 2,
max_NA_groups = Inf,
svd_K = TRUE,
verbose = TRUE,
save_current_state = TRUE,
diagonalize_ZtZ_Kinv = TRUE,
...
)
}
\arguments{
\item{which_sampler}{List with two elements (Y and F) specifying which sampling function
to use for the observations (Y) and factors (F). Each is a number in 1-4. 1-3 are block updators. 4 is a single-site updater.
MegaLMM uses 1-3 depending on data dimensions.
MegaBayesC uses 4 which updates each coefficient individually.}
\item{run_sampler_times}{For \code{which_sampler==4}, we can repeat the single-site sampler multiple times to help take larger steps each iteration.}
\item{scale_Y}{Should the Y values be centered and scaled? Recommend, except for simulated data.}
\item{K}{number of factors}
\item{h2_divisions}{A scalar or vector of length equal to number of random effects. In MegaLMM, random
effects are parameterized as proportions of the total variance of all random effects plus residuals.
The prior on the variance componets is discrete spanning the interval [0,1) over each varince component proportion
with \code{h2_divisions} equally spaced values is constructed. If
\code{h2_divisions} is a scalar, the prior for each variance component has this number of divisions.
If a vector, the length should equal the number of variance components, in the order of the random effects specified in the model}
\item{h2_step_size}{Either NULL, or a scaler in the range (0,1].
If NULL, h2's will be sampled based on the marginal probability over all possible h2 vectors.
If a scalar, a Metropolis-Hastings update step will be used for each h2 vector.
The trail value will be selected uniformly from all possible h2 vectors within this Euclidean distance from the current vector.}
\item{drop0_tol}{A scalar giving the a tolerance for the \code{drop0()} function that will be applied
to various symmetric (possibly) sparse matrices to try to fix numerical errors and increase sparsity.}
\item{K_eigen_tol}{A scalar giving the minimum eigenvalue of a K matrix allowed. During pre-processing,
eigenvalues of each K matrix will be calculated using \code{svd(K)}. Only eigenvectors of K with corresponding eigenvalues
greater than this value will be kept. If smaller eigenvalues exist, the model will be transformed
to reduce the rank of K, by multiplying Z by the remaining eigenvectors of K. This transformation
is undone before posterior samples are recorded, so posterior samples of \code{U_F} and \code{U_R} are
untransformed.}
\item{burn}{burnin length of the MCMC chain}
\item{thin}{thinning rate of the MCMC chain}
\item{max_NA_groups}{If 0, all NAs will be imputed during sampling. If Inf, all NAs will be marginalized over.
If in (0,Inf), up to this many groups of columns will be separately sampled.
The minimum number of NAs in each column not in one of these groups will be imputed.}
\item{svd_K}{If TRUE, the the diagonalization of ZKZt for the first random effect is accomplished using this algorithm:
https://math.stackexchange.com/questions/67231/singular-value-decomposition-of-product-of-matrices which doesn't require forming ZKTt.
If FALSE, the SVD of ZKZt for the first random effect is calculated directly. TRUE is generally faster if the same genomes are repeated several times.}
\item{verbose}{should progress during initiation and sampling be printed?}
\item{save_current_state}{should the current state of the sampler be saved every time the function \code{sample_MegaLMM} is called?}
}
\description{
Function to create run_parameters list for initializing MegaLMM model
}
\seealso{
\code{\link{MegaLMM_init}}, \code{\link{sample_MegaLMM}}, \code{\link{print.MegaLMM_state}}
}
|
da889f2b2004f9af9592d40bda17126657d376be
|
806d4c20f16475c054050a483ddb5d0f9683d94a
|
/simsem/inst/tests/test_adjust-methods.R
|
8ca9a96bb485776997ef8605937baeeb37de273b
|
[] |
no_license
|
yczh/simsem
|
b0ffb75da611962164c9de2b855c154c8864c202
|
7262ebab6695178641424e4f30cd97b744799fce
|
refs/heads/master
| 2021-03-10T15:23:51.143229
| 2020-02-12T16:16:58
| 2020-02-12T16:16:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 929
|
r
|
test_adjust-methods.R
|
source("../../R/AllClass.R")
source("../../R/AllGenerics.R")
source("../../R/simMatrix.R")
source("../../R/simUnif.R")
source("../../R/simVector.R")
source("../../R/adjust-methods.R")
context("adjust-methods: SimMatrix")
loading <- matrix(0, 6, 2)
loading[1:3, 1] <- NA
loading[4:6, 2] <- NA
LX <- simMatrix(loading, 0.7)
u34 <- simUnif(0.3, 0.4)
LX2 <- adjust(LX, "u34", c(2, 1))
expect_that(LX2@param[2,1],matches("u34"))
expect_true(class(LX2)[1] == "SimMatrix")
LX3 <- adjust(LX, 0, c(2,1))
expect_that(LX3@param[2,1],matches(""))
LX4 <- adjust(LX, 0.5, c(2,2), FALSE)
expect_that(LX4@param[2,2],matches("0.5"))
expect_true(is.na(LX4@free[2,2]))
context("adjust-methods: SimVector")
factor.mean <- rep(NA, 2)
factor.mean.starting <- c(5, 2)
AL <- simVector(factor.mean, factor.mean.starting)
n01 <- simUnif(0, 1)
AL2 <- adjust(AL, "n01", 2)
expect_true(AL2@param[2] == "n01")
expect_true(class(AL2)[1] == "SimVector")
|
d2793cf2ef66394b2177c441672e8ddf313cecad
|
6b7eac94cab95036dfcb8f49f992524947aa40ca
|
/man/sympt_risk.Rd
|
95875be7961be8b4b774d62fc2de4a208aa7bd9d
|
[
"MIT"
] |
permissive
|
Urban-Analytics/rampuaR
|
d9e4a7b4acfbf06cccc0b25a68dfafebc1836256
|
4a73131228b872a517916e964ac732ff3b25d519
|
refs/heads/master
| 2023-01-14T11:27:10.922266
| 2020-11-24T15:20:49
| 2020-11-24T15:20:49
| 280,127,722
| 2
| 0
|
MIT
| 2020-11-05T10:31:09
| 2020-07-16T10:44:09
|
R
|
UTF-8
|
R
| false
| true
| 902
|
rd
|
sympt_risk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covid_status_functions.R
\name{sympt_risk}
\alias{sympt_risk}
\title{Calculating risk of being symptomatic mortality rate based on age
and health risks}
\usage{
sympt_risk(
df,
overweight_sympt_mplier = 1.46,
cvd = NULL,
diabetes = NULL,
bloodpressure = NULL
)
}
\arguments{
\item{df}{The input list - the output from the create_input function}
\item{overweight_sympt_mplier}{The obesity risk multiplier for BMI > 40}
\item{cvd}{The cardiovascular disease mortality risk multiplier}
\item{diabetes}{The disease mortality risk multiplier}
\item{bloodpressure}{The bloodpressure/hypertension mortality risk multiplier}
}
\value{
A list of data to be used in the infection model -
with updated mortality risk
}
\description{
Calculating risk of being symptomatic mortality rate based on age
and health risks
}
|
4c576b50e7aa0d2fdc635d41582cacff6c8b091a
|
145b352ce415133220b875ae59cc4cd849c57d7b
|
/Models_WolfRec_IPM.R
|
2c9831f0596741b6fce5a173fce5f215540c75b8
|
[] |
no_license
|
akeever2/PhD-WolfProject
|
c4a9f42564001e03f5372f3e27fae4650e1399ab
|
f59b818bd1110d705abac4edaae3a3f2ebac8b3d
|
refs/heads/master
| 2021-01-09T20:18:54.651390
| 2019-04-17T18:46:35
| 2019-04-17T18:46:35
| 60,861,089
| 0
| 0
| null | 2017-02-14T17:49:09
| 2016-06-10T16:14:20
|
R
|
UTF-8
|
R
| false
| false
| 111,567
|
r
|
Models_WolfRec_IPM.R
|
####################################################################I
# A priori models using IPM to estimate recruitment of wolves in Montana
# For more details regarding the IPM see the rscript IPM_WolfRec_V2
# For running models see the rscript Analyses_WolfRec_IPM
# Allison C. Keever
# akeever1122@gmail.com
# github.com/akeever2
# Montana Cooperative Wildlife Research Unit
# 2018
# Occupancy code adapted from Sarah B Bassing
# sarah.bassing@gmail.com
# github.com/SarahBassing
# Montana Cooperative Wildlife Research Unit
# August 2016
######################################################################I
#### Set WD ####
# Set the working directory I want to save files to
setwd("C:/Users/allison/Documents/Project/Dissertation/Recruitment/Results/ModelResultFiles")
#### M1; FIX R ~ pack size ####
sink("M1_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit <- sum(event.chi[])
for(i in 1:nobs){
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
# pois2[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(2,25)
# pois1[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(y.group[i,k],25)
# G[i,k] <- (pois1[i,k] * indicator[i,k]) + (pois2[i,k] * (1 - indicator[i,k]))
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M2; FIX R ~ pack size + ran year ####
sink("M2_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
# Random effect for year
for(k in 1:(nyears-1)){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M3; FIX R ~ pack size + ran region ####
sink("M3_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.reg[GroupReg[i]])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M4; FIX R ~ pack size + ran year + ran region ####
sink("M4_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
# Random effect for year
for(k in 1:(nyears-1)){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M5: FIX R ~ pack size + ran year + ran region + method ####
sink("M5_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
for(i in 1:3){
b.method[i] ~ dnorm(0, 0.001)
}
# Random effect for year
for(k in 1:nyears){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]] + b.method[Method[k]])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M6: FIX R ~ pack size + ran year + ran region + 4wd + 2wd ####
sink("M6_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
b.2wd ~ dnorm(0, 0.001)
b.4wd ~ dnorm(0, 0.001)
# Prior for missing data
for(i in 1:ngroups){
FourWD[i] ~ dnorm(0,1)
TwoWD[i] ~ dnorm(0,1)
}
# Random effect for year
for(k in 1:nyears){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]] + b.2wd * TwoWD[i] + b.4wd * FourWD[i])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M7: FIX R ~ pack size + ran year + ran region + forcov ####
sink("M7_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
b.forest ~ dnorm(0, 0.001)
# Prior for missing data
for(i in 1:ngroups){
Forest[i] ~ dnorm(0,1)
}
# Random effect for year
for(k in 1:nyears){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]] + b.forest * Forest[i])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M8: FIX R ~ pack size + ran year + ran region + ####
#### M9: R ~ pack size + ran year + ran region + dd ####
sink("M9_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
b.dd ~ dnorm(0, 0.001)
# Random effect for year
for(k in 1:nyears){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
# Dispersal
for(k in 1:nyears){
em.group[1,k] ~ dbeta(51.21888, 394.1627)
em.group[2,k] ~ dbeta(47.74287, 525.4008)
}
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit <- sum(event.chi[])
for(i in 1:nobs){
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group[Harv[k-1],k-1]) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group[Harv[k],k]))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est2[k] <- P[k] * G.mean[k]
n.est[k] <- P[k] * G.dat[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
for(k in 1:nyears){
G.dat[k] ~ dnorm(mu.G[k], 1 / (sd.G[k] * sd.G[k]))T(0,)
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]] + b.dd * LogN[k])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M10: R ~ pack size + ran year + ran region + harv ####
sink("M10_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
for(i in 1:2){
b.harv[i] ~ dnorm(0, 0.001)
}
# Random effect for year
for(k in 1:nyears){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
# Dispersal
for(k in 1:nyears){
em.group[1,k] ~ dbeta(51.21888, 394.1627)
em.group[2,k] ~ dbeta(47.74287, 525.4008)
}
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group[Harv[k-1],k-1]) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group[Harv[k-1],k-1]))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est2[k] <- P[k] * G.mean[k]
n.est[k] <- P[k] * G.dat[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
for(k in 1:nyears){
G.dat[k] ~ dnorm(mu.G[k], 1 / (sd.G[k] * sd.G[k]))T(0,)
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]] + b.harv[Harv[k]])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M11: R ~ pack size + ran year + ran region + winter ####
sink("M11_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
b.winter ~ dnorm(0, 0.001)
# Random effect for year
for(k in 1:nyears){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
# Dispersal
for(k in 1:nyears){
em.group[1,k] ~ dbeta(51.21888, 394.1627)
em.group[2,k] ~ dbeta(47.74287, 525.4008)
}
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group[Harv[k-1],k-1]) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est2[k] <- P[k] * G.mean[k]
n.est[k] <- P[k] * G.dat[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
for(k in 1:nyears){
G.dat[k] ~ dnorm(mu.G[k], 1 / (sd.G[k] * sd.G[k]))T(0,)
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]] + b.winter * Winter[k])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### M12: FIX R~pack size+ran year+ran region+winter+harv+forest+road ####
sink("M12_GroupRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## 1.1 Occupancy priors
# psi1 coefficient (occupancy in year 1)
B0.psi1 ~ dnorm(0,0.001)
# Priors for transition probabilities (survival and colonization)
for(k in 1:(nyears-1)){
B0.colo[k] ~ dnorm(0,0.001)
}#k
B0.phi ~ dnorm(0,0.001)
# Priors for detection probabilities
B0.p11 ~ dnorm(0,0.001)
B0.p10 ~ dnorm(0,0.001)
B0.b ~ dnorm(0,0.001)
# Priors for covariates
b.pc1.psi ~ dnorm(0,0.001)
b.recPC.psi ~ dnorm(0,0.001)
b.pc1.colo ~ dnorm(0,0.001)
b.recPC.colo ~ dnorm(0,0.001)
b.pc1.phi ~ dnorm(0,0.001)
b.area.p11 ~ dnorm(0,0.001)
b.huntdays.p11 ~ dnorm(0,0.001)
b.acv.p11 ~ dnorm(0,0.001)
b.map.p11 ~ dnorm(0,0.001)
b.nonfrrds.p11 ~ dnorm(0,0.001)
b.frrds.p11 ~ dnorm(0,0.001)
b.huntdays.p10 ~ dnorm(0,0.001)
b.nonfrrds.p10 ~ dnorm(0,0.001)
b.frrds.p10 ~ dnorm(0,0.001)
b.acv.p10 ~ dnorm(0,0.001)
## 1.2 Territory priors
## 1.3 Survival priors
# Random effect for year
for(k in 1:nyears){
eps.surv[k] ~ dnorm(0, tau.surv)
}
sigma.surv ~ dunif(0,100)
tau.surv <- pow(sigma.surv, -2)
var.surv <- pow(sigma.surv, 2)
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(0,0.001)
}
## 1.4 Group priors
# Initial group sizes
for(i in 1:ngroups){
G[i,1] ~ dpois(7)T(2,)
}
# Process error
tauy.group <- pow(sigma.group, -2)
sigma.group ~ dunif(0,100)
var.group <- pow(sigma.group, 2)
## 1.5 Recruitment priors
# Priors for beta coefficients
B0.gam ~ dnorm(0,0.001)
B1.gam ~ dnorm(0,0.001)
for(i in 1:2){
b.harv[i] ~ dnorm(0, 0.001)
}
# Random effect for year
for(k in 1:nyears){
eps.gam[k] ~ dnorm(0, tau.gam)
}
sigma.gam ~ dunif(0,100)
tau.gam <- pow(sigma.gam, -2)
var.gam <- pow(sigma.gam, 2)
# Random effect for region
for(r in 1:nregions){
eps.reg[r] ~ dnorm(0, tau.reg)
}
sigma.reg ~ dunif(0,100)
tau.reg <- pow(sigma.reg, -2)
var.reg <- pow(sigma.reg, 2)
############################################################
# 2. Likelihoods
############################################################
#####################
# 2.1. Occupancy likelihood
# Adapted from Sarah B Bassing
# Montana Cooperative Wildlife Research Unit
# August 2016.
# This is a DYNAMIC FALSE-POSITVE MULTI-SEASON occupancy model
# Encounter histories include:
# 1 = no detection
# 2 = uncertain detection
# 3 = certain detection
####################
# Ecological process/submodel
# Define State (z) conditional on parameters- Nmbr sites occupied
for(i in 1:nsites){
logit(psi1[i]) <- B0.psi1 + b.pc1.psi * PC1[i] + b.recPC.psi * recPC[i,1]
z[i,1] ~ dbern(psi1[i])
for(k in 1:(nyears-1)){
logit(phi[i,k]) <- B0.phi + b.pc1.phi * PC1[i]
logit(colo[i,k]) <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
}#k
for(k in 2:nyears){
muZ[i,k] <- z[i,k-1] * phi[i,k-1] + (1-z[i,k-1]) * colo[i,k-1]
z[i,k] ~ dbern(muZ[i,k])
}#k
}#i
# Observation process/submodel
# z is either 0 or 1 (unoccupied or occupied)
# y (observation dependent on the state z) can be 0,1,2 (no obs, uncertain obs,
# certain obs) but JAGS's multinomial link function, dcat(), needs y to be 1,2,3
# Observation process: define observations [y,i,j,k,z]
# y|z has a probability of...
# Detection probabilities are site, occasion, and year specific
for(i in 1:nsites){
for (j in 1:noccs){
for(k in 1:nyears){
p[1,i,j,k,1] <- (1 - p10[i,j,k])
p[1,i,j,k,2] <- (1 - p11[i,j,k])
p[2,i,j,k,1] <- p10[i,j,k]
p[2,i,j,k,2] <- (1 - b[i,j,k]) * p11[i,j,k]
p[3,i,j,k,1] <- 0
p[3,i,j,k,2] <- b[i,j,k] * p11[i,j,k]
}#k
}#j
}#i
# Need mulitnomial link function for false positive detections: dcat function in JAGS
# p11 is normal detction, p10 is false-positive dection (i.e., detected but wrong),
# b is certain detection
# Observation model
for(i in 1:nsites){
for(j in 1:noccs){
for(k in 1:nyears){
logit(p11[i,j,k]) <- B0.p11 + b.area.p11 * area[i] + b.huntdays.p11 * huntdays[i,j,k] + b.nonfrrds.p11 * nonforrds[i] + b.frrds.p11 * forrds[i] + b.acv.p11 * acv[i,j,k] + b.map.p11 * mapppn[i,j,k]
logit(p10[i,j,k]) <- B0.p10 + b.acv.p10 * acv[i,j,k] + b.huntdays.p10 * huntdays[i,j,k] + b.nonfrrds.p10 * nonforrds[i] + b.frrds.p10 * forrds[i]
logit(b[i,j,k]) <- B0.b
y.occ[i,j,k] ~ dcat(p[,i,j,k,(z[i,k]+1)])
}#k
}#j
}#i
# Derived parameters
for(i in 1:nsites){
psi[i,1] <- psi1[i]
for (k in 2:nyears){
psi[i,k] <- psi[i,k-1] * phi[i,k-1] + (1 - psi[i,k-1]) * colo[i,k-1]
}#k
}#i
# Area occpupied indexed by year and region
for(k in 1:nyears){
A[k] <- sum(psi[,k] * area[])
}
#####################
# 2.2. Territory model
# Input includes area occupied (A) indexed by year (k) from occupancy model (2.1.)
# Area occupied is divided by territory size (T), which is currently set to 600
# km squared, but will be based on data from Rich et al. 2012 for territory size
# Output is number of packs (P) indexed by year (k)
####################
# Pull in data for the mean for territory size
T3 ~ dlnorm(6.22985815, 1/0.58728123)
# Estimate number of packs from area occupied (A) and territory size (T)
for(k in 1:nyears){
P[k] <- (A[k] / (T3 + 0.000001)) * T.overlap[k]
}
#####################
# 2.3. Survival likelihood
# Current model is
# Output is survival indexed by year (k)
####################
# Estimate the harzard.
# This part transforms the linear predictor (mu.surv)
# using the cloglog link and relates it to the data (event) for each
# observation
for(i in 1:nobs){
event[i] ~ dbern(mu.surv[i])
cloglog(mu.surv[i]) <- b.period.surv[Period[i]] + eps.surv[Year[i]]
}#i
# Predicted values
# Baseline hazard
for(k in 1:nyears){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1 - mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:nyears){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:nyears){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
# Compute posterior predictive check statistics
for(i in 1:nobs){
# Expected values #### NOTE-Maybe multiple by nobs instead of 1? ####
event.expected[i] <- 1 * mu.surv[i]
# Fit statistic for actual data
event.chi[i] <- pow((event[i] - event.expected[i],2))/(event.expected[i] + 0.01)
fit <- sum(event.chi[])
# Replicate data for GOF
event.rep[i] ~ dbern(mu.surv[i])
# Fit statistics for replicate data
event.chi.new[i] <- pow((event.rep[i] - event.expected[i]),2)/(event.expected[i] + 0.01)
}
fit.new <- sum(event.chi.new[])
#####################
# 2.4. Group level counts likelihood
# Input data are group counts (y.group)
# Input estimates are survival (s) from survival model indexed by year (k) and
# recruitment (number of pups per pack, gamma) indexed by year and group (i)
# Output is mean estimate of group size (G) which are indexed by year and group
####################
# Ecological model/ system process
for(i in 1:ngroups){
for(k in 2:nyears){
g.mu[i,k] <- G[i,k-1] * annual.s[k-1] * (1 - em.group) + gamma[i,k-1]
G[i,k] ~ dnorm(g.mu[i,k], 1 / (g.mu[i,k] + 0.00001))T(0,25)
}
}
# Observation proccess
for(i in 1:ngroups){
for(k in 1:nyears){
y.group[i,k] ~ dnorm(G[i,k], tauy.group)
}
}
# Derived parameters
for(k in 1:nyears){
G.mean[k] <- mean(G[,k] * annual.s[k] * (1 - em.group))
G.mean.high[k] <- mean(G[,k])
gamma.mean[k] <- mean(gamma[,k])
n.est[k] <- P[k] * G.mean[k]
}
for(k in 2:nyears){
pop.growth[k] <- n.est[k] / n.est[k-1]
}
#####################
# 2.5. Recruitment model
####################
# Generalized linear model with log link function for recruitment
for(i in 1:ngroups){
for(k in 1:nyears){
mu.gamma[i,k] <- exp(B0.gam + B1.gam * G[i,k] + eps.gam[k] + eps.reg[GroupReg[i]] + b.harv[Harv[i]])
gamma[i,k] ~ dpois(mu.gamma[i,k])
}
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
#### Population level - same for all models ####
sink("PopRecIPM.txt")
cat("
model {
############################################################
# 1. Priors
############################################################
## Population priors
# Initial population size
N.tot[1] ~ dnorm(600, 0.0001)I(0,)
## Bring in data s, G.mean, gamma.mean, P, colo, and phi
for(k in 1:nyears){
P[k] ~ dnorm(P2[k,1], 1 / (P2[k,2] * P2[k,2]+ 0.0000001))
# G.mean[k] ~ dnorm(G.mean2[k,1], 1 / (G.mean2[k,2] * G.mean2[k,2]+ 0.0000001))
gamma.mean[k] ~ dnorm(gamma2[k,1], 1 / (gamma2[k,2] * gamma2[k,2]+ 0.0000001))
}
for(k in 1:(nyears-1)){
# B0.colo[k] ~ dnorm(betas[k,3], 1 / (betas[k,4] * betas[k,4]+ 0.0000001))
eps.surv[k] ~ dnorm(betas[k,13], 1 / (betas[k,14] * betas[k,14]+ 0.0000001))
}
# B0.phi ~ dnorm(betas[1,1], 1 / (betas[1,2] * betas[1,2]+ 0.0000001))
# b.pc1.colo ~ dnorm(betas[1,5], 1 / (betas[1,6] * betas[1,6]+ 0.0000001))
# b.recPC.colo ~ dnorm(betas[1,7], 1 / (betas[1,8] * betas[1,8]+ 0.0000001))
# b.pc1.phi ~ dnorm(betas[1,9], 1 / (betas[1,10] * betas[1,10]+ 0.0000001))
for(p in 1:nperiods){
b.period.surv[p] ~ dnorm(betas[p,11], 1 / (betas[p,12] * betas[p,12]+ 0.0000001))
}
T ~ dlnorm(6.22985815, 1/0.58728123)
############################################################
# 2. Likelihood
############################################################
# Ecological model/ system process
# First determine colonization and extinction
# for(i in 1:nsites){
# for(k in 1:(nyears-1)){
# colo[i,k] <- B0.colo[k] + b.pc1.colo * PC1[i] + b.recPC.colo * recPC[i,k+1]
# phi[i,k] <- B0.phi + b.pc1.phi * PC1[i]
# }
# }
# Then determine survival
# Baseline hazard
for(k in 1:(nyears-1)){
for(p in 1:nperiods){
cloglog(mu.pred[p,k]) <- b.period.surv[p] + eps.surv[k]
hazard[p,k] <- -log(1-mu.pred[p,k])
}#p
}#k
# Cumulative hazard and survival
for(k in 1:(nyears-1)){
base.H[1,k] <- hazard[1,k] * width.interval[1]
for(p in 2:nperiods){
base.H[p,k] <- base.H[p-1, k] + hazard[p,k] * width.interval[p]
}#p
}#k
for(k in 1:(nyears-1)){
for(p in 1:nperiods){
base.s[p,k] <- exp(-base.H[p,k])
}#p
annual.s[k] <- base.s[length(width.interval), k]
}#k
for(k in 2:nyears){
N.rec[k] ~ dpois(P[k-1] * gamma.mean[k-1])
# N.ps[k] ~ dpois((P[k-1] + sum(colo[,k-1]*((area[] * T.overlap[k])/T - P[k-1])) - P[k-1] * (1 - sum(phi[,k-1]))) * G.mean[k-1])
N.ad[k] ~ dbin(annual.s[k-1], round(N.tot[k-1]))
N.tot[k] <- N.ad[k] + N.rec[k]
}
# Linking pack size (P) and mean group size (G.mean) as data (n.est) to abundance (N.tot)
for(k in 1:nyears){
n.est[k,1] ~ dnorm(N.tot[k], (1 / (n.est[k,2]*n.est[k,2]+0.00001)))
}
############################################################
# 3. Bugs requirements
############################################################
}", fill=TRUE)
sink()
|
462f06a6ab36210362dda0cd9131ecfa196b2abe
|
ab39ad07bbbb65c1e61315076a43bce6cfa688f3
|
/R_Codes/3-PLS/spls_scores/man/yeast.Rd
|
460fcee90f7ec534797572bd6f80666f8fe94765
|
[] |
no_license
|
ManonMartin/thesisMaterial
|
771e7fdcd0ff3ceba7607f2abaa7d05e20366d61
|
950b79588f224c649ef12a3a3c0fa7c3280e9806
|
refs/heads/master
| 2021-11-06T23:15:01.741098
| 2021-11-04T13:34:46
| 2021-11-04T13:34:46
| 208,266,675
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,914
|
rd
|
yeast.Rd
|
\name{yeast}
\docType{data}
\alias{yeast}
\title{Yeast Cell Cycle Dataset}
\description{
This is the Yeast Cell Cycle dataset used in Chun and Keles (2010).
}
\usage{ data(yeast) }
\format{
A list with two components:
\describe{
\item{x}{ ChIP-chip data. A matrix with 542 rows and 106 columns.}
\item{y}{ Cell cycle gene expression data.
A matrix with 542 rows and 18 columns.}
}
}
\details{
Matrix \code{y} is cell cycle gene expression data (Spellman et al., 1998)
of 542 genes from an \eqn{\alpha} factor based experiment.
Each column corresponds to mRNA levels
measured at every 7 minutes during 119 minutes (a total of 18 measurements).
Matrix \code{x} is the chromatin immunoprecipitation on chip (ChIP-chip) data of
Lee et al. (2002) and it contains the binding information for 106
transcription factors. See Chun and Keles (2010) for more details.
}
\source{
Lee TI, Rinaldi NJ, Robert F, Odom DT, Bar-Joseph Z, Gerber GK, Hannett NM,
Harbison CT, Thomson CM, Simon I, Zeitlinger J, Jennings EG, Murray HL,
Gordon DB, Ren B, Wyrick JJ, Tagne JB, Volkert TL, Fraenkel E, Gifford DK,
and Young RA (2002), "Transcriptional regulatory networks in \emph{Saccharomyces cerevisiae}",
\emph{Science}, Vol. 298, pp. 799--804.
Spellman PT, Sherlock G, Zhang MQ, Iyer VR, Anders K, Eisen MB, Brown PO,
Botstein D, and Futcher B (1998), "Comprehensive identification of cell cycle-regulated genes of
the yeast \emph{Saccharomyces cerevisiae} by microarray hydrization",
\emph{Molecular Biology of the Cell}, Vol. 9, pp. 3273--3279.
}
\references{
Chun H and Keles S (2010), "Sparse partial least squares
for simultaneous dimension reduction and variable selection",
\emph{Journal of the Royal Statistical Society - Series B}, Vol. 72, pp. 3--25. }
\examples{
data(yeast)
yeast$x[1:5,1:5]
yeast$y[1:5,1:5]
}
\keyword{datasets}
|
3647496a23a4328229edac1135baa728c3d986e8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/WeibullR/examples/weibayes.Rd.R
|
64ff1777486926573da909c8485dc3b505989bc6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 198
|
r
|
weibayes.Rd.R
|
library(WeibullR)
### Name: weibayes
### Title: Fitting for Minimal Failure Datasets
### Aliases: weibayes
### ** Examples
fail<-5
susp<-rweibull(10, 1, 10)
eta<-weibayes(fail, susp, beta=1)
|
540738146201eee1b935e82a4d14778d3c367d33
|
1d9f05ac52835004f1f54e890cd9e6b11ea7bc0c
|
/R/utils.R
|
bc53ec43628dd57f8efb3009384d6fdb9672421b
|
[] |
no_license
|
NickPTaylor/simple
|
492b423cb00afeef83b3f58652d67bc2623eddc3
|
7988d1d006febb4f120ffa1f09e5789e7a8fcc4c
|
refs/heads/master
| 2021-06-07T04:29:22.456303
| 2016-10-31T10:02:38
| 2016-10-31T12:05:36
| 72,423,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 732
|
r
|
utils.R
|
#' Launch \code{reverse_string} shiny application.
#'
#' This is a wrapper function for launching the 'application' shiny app in this
#' package.
#'
#' @param ... Arugments to \code{shiny::runApp()}.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' launch_application()
#' }
launch_application <- function(...) {
shiny::runApp(appDir = system.file("application", package = "simple"),
...)
}
#' Reverse a character string.
#'
#' @param x A character string.
#'
#' @return The character string \code{x} reversed.
#' @export
#'
#' @examples
#' reverse_string("hello")
#' reverse_string("hello world")
reverse_string <- function(x) {
x <- strsplit(x, "")[[1]]
x <- rev(x)
x <- paste0(x, collapse = "")
}
|
7aef9bdfc1e47af031221fc6b8a2c6cf2ff3cea4
|
f207a065215b4fa52a4c6e8056f071ec8af28c38
|
/man/ler_noticias_g1.Rd
|
35df108f5ef3e0e2480523af141a9f19a9a6e1e4
|
[
"MIT"
] |
permissive
|
jjesusfilho/g1
|
eba458ad5bb859cadf53276c1a06d6fb51b15cda
|
f8f603a29560deabced4a7f4ca6bd70dc03776fd
|
refs/heads/master
| 2020-08-30T07:25:59.514378
| 2019-10-29T17:56:57
| 2019-10-29T17:56:57
| 218,304,559
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 552
|
rd
|
ler_noticias_g1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ler_noticias_g1.R
\name{ler_noticias_g1}
\alias{ler_noticias_g1}
\title{Lê as notícias baixadas pela função baixar_noticias_g1}
\usage{
ler_noticias_g1(arquivos = NULL, diretorio = ".")
}
\arguments{
\item{arquivos}{Vetor de arquivos. Se NULL, informar diretório}
\item{diretorio}{Informar diretório onde estão os arquivos}
}
\value{
tibbe
}
\description{
Lê as notícias baixadas pela função baixar_noticias_g1
}
\examples{
\dontrun{
df <- ler_noticias_g1()
}
}
|
f558ce41b634e8ab862860b4b3fb98f0eb78ff91
|
f7fe3a7dd980fe1199df3de3708f778745ca116d
|
/scripts/PGLS.funcs.R
|
7d466b1976092c0d2747eee8892b1fb478e1c672
|
[
"MIT"
] |
permissive
|
flw88/mut_sex_bias_amniotes
|
2597b175cff38ce9069514d5ee1b73428c514288
|
37da9bdbc2c7cb839de15aadb554cf6c98128add
|
refs/heads/main
| 2022-08-23T10:05:29.039202
| 2022-08-17T21:04:45
| 2022-08-17T21:04:45
| 455,734,577
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,206
|
r
|
PGLS.funcs.R
|
#!/usr/bin/env Rscript
suppressMessages( library(getopt) )
suppressMessages( library(ape) )
suppressMessages( library(ggplot2) )
suppressMessages( library(data.table) )
suppressMessages( library(caper) )
suppressMessages( library(stringr) )
suppressMessages( library(Cairo) )
suppressMessages( library(factoextra) )
suppressMessages( library(ggrepel) )
suppressMessages( library(ungeviz) )
suppressMessages( library(castor) )
suppressMessages( library(geiger) )
suppressMessages( library(RColorBrewer) )
# Prepare data frame for PGLS
prepareDataframe <- function(d, xvar, yvar, xlog, ylog){
v <- c("Species", xvar, yvar)
subd <- d[,..v]
colnames(subd) <- c("Species", "xvar", "yvar")
subd <- as.data.frame(subd)
if (xlog==TRUE){
subd[,"xvar"] = log10(subd[,"xvar"])
}
if (ylog==TRUE){
subd[,"yvar"] = log10(subd[,"yvar"])
}
return(subd)
}
pglsMod <- function (formula, data, lambda = 1, kappa = 1, delta = 1, param.CI = 0.95,
control = list(fnscale = -1), bounds = NULL, optimPar = NULL)
{
Dfun <- function(Cmat) {
iCmat <- solve(Cmat, tol = .Machine$double.eps)
svdCmat <- La.svd(iCmat)
D <- svdCmat$u %*% diag(sqrt(svdCmat$d)) %*% t(svdCmat$v)
return(t(D))
}
if (!inherits(data, "comparative.data"))
stop("data is not a 'comparative' data object.")
dname <- deparse(substitute(data))
call <- match.call()
miss <- model.frame(formula, data$data, na.action = na.pass)
miss.na <- apply(miss, 1, function(X) (any(is.na(X))))
if (any(miss.na)) {
miss.names <- data$phy$tip.label[miss.na]
data <- data[-which(miss.na), ]
}
m <- model.frame(formula, data$data)
y <- m[, 1]
x <- model.matrix(formula, m)
k <- ncol(x)
namey <- names(m)[1]
xVar <- apply(x, 2, var)[-1]
badCols <- xVar < .Machine$double.eps
if (any(badCols))
stop("Model matrix contains columns with zero variance: ",
paste(names(xVar)[badCols], collapse = ", "))
if (is.null(data$vcv)) {
V <- if (kappa == 1) {
VCV.array(data$phy)
}
else {
VCV.array(data$phy, dim = 3)
}
data$vcv <- V
}
else {
V <- data$vcv
}
nm <- names(data$data)
n <- nrow(data$data)
if (!is.null(param.CI)) {
if (!is.numeric(param.CI) || param.CI <= 0 || param.CI >
1)
stop("param.CI is not a number between 0 and 1.")
}
usrBounds <- bounds
bounds <- list(kappa = c(1e-06, 3), lambda = c(1e-06, 1),
delta = c(1e-06, 3))
if (!is.null(usrBounds)) {
if (!is.list(usrBounds))
stop("Bounds must be a list of named bounds for any or all of kappa, lambda and delta")
usrNames <- names(usrBounds)
badNames <- setdiff(usrNames, c("kappa", "lambda", "delta"))
if (length(badNames) > 0)
stop("The list of bounds contains names other than kappa, lambda and delta")
for (nm in usrNames) {
bounds[nm] <- usrBounds[nm]
}
}
parVals <- list(kappa = kappa, lambda = lambda, delta = delta)
for (i in seq_along(parVals)) {
p <- parVals[[i]]
nm <- names(parVals)[i]
if (length(p) > 1)
stop(nm, " not of length one.")
if (is.character(p) & p != "ML")
stop(nm, " is character and not 'ML'.")
bnds <- bounds[[nm]]
if (length(bnds) > 2)
stop("Bounds specified for ", nm, " not of length one.")
if (!is.numeric(bnds))
stop("Non-numeric bounds specified for ", nm, ".")
if (any(bnds < 0))
stop("Negative values in bounds specified for ",
nm, ".")
lb <- bnds[1]
ub <- bnds[2]
if (lb > ub)
stop("Lower bound greater than upper bound for ",
nm, ".")
if (is.numeric(p) & (p < lb | p > ub))
stop(sprintf("%s value (%0.2f) is out of specified bounds [%0.2f, %0.2f]",
nm, p, lb, ub))
}
if (kappa != 1 && length(dim(V)) != 3)
stop("3D VCV.array needed for kappa transformation.")
mlVals <- sapply(parVals, "==", "ML")
if (any(mlVals)) {
parVals[mlVals] <- lapply(bounds, mean)[mlVals]
parVals <- as.numeric(parVals)
names(parVals) <- c("kappa", "lambda", "delta")
if(is.null(optimPar)){
optimPar <- parVals[mlVals]
}
fixedPar <- parVals[!mlVals]
lower.b <- sapply(bounds, "[", 1)[mlVals]
upper.b <- sapply(bounds, "[", 2)[mlVals]
optim.param.vals <- optim(optimPar, fn = pgls.likelihood,
method = "L-BFGS-B", control = control, upper = upper.b,
lower = lower.b, V = V, y = y, x = x, fixedPar = fixedPar,
optim.output = TRUE)
if (optim.param.vals$convergence != "0") {
stop("Problem with optim:", optim.param.vals$convergence,
optim.param.vals$message)
}
fixedPar <- c(optim.param.vals$par, fixedPar)
fixedPar <- fixedPar[c("kappa", "lambda", "delta")]
}
else {
fixedPar <- as.numeric(parVals)
names(fixedPar) <- c("kappa", "lambda", "delta")
}
ll <- pgls.likelihood(optimPar = NULL, fixedPar = fixedPar,
y, x, V, optim.output = FALSE)
log.lik <- ll$ll
Vt <- pgls.blenTransform(V, fixedPar)
aic <- -2 * log.lik + 2 * k
aicc <- -2 * log.lik + 2 * k + ((2 * k * (k + 1))/(n - k -
1))
coeffs <- ll$mu
names(coeffs) <- colnames(x)
varNames <- names(m)
pred <- x %*% ll$mu
res <- y - pred
D <- Dfun(Vt)
pres <- D %*% res
fm <- list(coef = coeffs, aic = aic, log.lik = log.lik)
RMS <- ll$s2
RSSQ <- ll$s2 * (n - k)
xdummy <- matrix(rep(1, length(y)))
nullMod <- pgls.likelihood(optimPar = NULL, fixedPar = fixedPar,
y, xdummy, V, optim.output = FALSE)
NMS <- nullMod$s2
NSSQ <- nullMod$s2 * (n - 1)
errMat <- t(x) %*% solve(Vt) %*% x
errMat <- solve(errMat) * RMS[1]
sterr <- diag(errMat)
sterr <- sqrt(sterr)
RET <- list(model = fm, formula = formula, call = call, RMS = RMS,
NMS = NMS, NSSQ = NSSQ[1], RSSQ = RSSQ[1], aic = aic,
aicc = aicc, n = n, k = k, sterr = sterr, fitted = pred,
residuals = res, phyres = pres, x = x, data = data, varNames = varNames,
y = y, param = fixedPar, mlVals = mlVals, namey = namey,
bounds = bounds, Vt = Vt, dname = dname)
class(RET) <- "pgls"
if (any(miss.na)) {
RET$na.action <- structure(which(miss.na), class = "omit",
.Names = miss.names)
}
if (!is.null(param.CI) && any(mlVals)) {
param.CI.list <- list(kappa = NULL, lambda = NULL, delta = NULL)
mlNames <- names(mlVals)[which(mlVals)]
for (param in mlNames) {
param.CI.list[[param]] <- pgls.confint(RET, param,
param.CI)
}
RET$param.CI <- param.CI.list
}
return(RET)
}
pglsLambdaML <- function(fmula, cdat, init.lam.values=(1:9)/10, kappa=1, delta=1){
max.lhood <- -Inf
for(init.lam in init.lam.values){
cur.ml <- try(pglsMod(fmula, cdat, lambda = "ML", kappa = kappa, delta = delta,
optimPar = c("lambda"=init.lam)))
if(class(cur.ml) == "try-error"){ next }
if(logLik(cur.ml)[1] > max.lhood){
max.lhood <- logLik(cur.ml)[1]
out.ml <- cur.ml
}
}
return(out.ml)
}
# PGLS wrapper
runPGLS <- function(d, xvar, yvar, xlog, ylog, phylogeny, lambda, kappa, delta){
subd <- prepareDataframe(d, xvar, yvar, xlog, ylog)
subd <- subd[!is.na(subd$xvar),]
if (nrow(subd)<3){
return("NA")
}
trimmed_phylogeny = keep.tip(phylogeny, subd$Species)
cdat <- comparative.data(data = subd, phy = trimmed_phylogeny, names.col = "Species")#,vcv=TRUE, vcv.dim=3
if((lambda == "ML") && (kappa != "ML") && (delta != "ML")){
mod <- pglsLambdaML(yvar ~ xvar, cdat, kappa=kappa, delta=delta)
} else {
mod <- try(pglsMod(yvar ~ xvar, cdat, lambda = lambda, kappa = kappa, delta = delta, optimPar = optimPar))
}
if(class(mod) == "try-error"){
return(NA)
}
df_mod = list("model" = mod, "data" = subd)
return(df_mod)
}
# PIC pairwise
# Function to produce PICs based on pairwise comparison of tips only
# (no ancestral node reconstruction; no overlaps).
# For N tips, gives N/2 PIC values (rounded down).
# Uses a minimum distance heuristic to iteratively choose pairs.
picPairwiseTips <- function(x, phylogeny, method="minimum"){
n <- Ntip(phylogeny)
if(method == "random"){
tip1 <- sample(phylogeny$tip.label, floor(n/2), replace=FALSE)
tip2 <- sample(setdiff(phylogeny$tip.label, tip1), floor(n/2), replace=FALSE)
tip.pairs <- cbind(tip1, tip2)
} else if(method == "minimum"){
dist.mat <- dist.nodes(phylogeny)[1:n, 1:n]
# Set diagonal to infinity
diag(dist.mat) <- Inf
rownames(dist.mat) <- colnames(dist.mat) <- phylogeny$tip.label
tip.pairs <- matrix(as.character(NA), nrow=floor(n/2), ncol=2)
for(i in 1:nrow(tip.pairs)){ # Iterate through and take minimum tip pair
min.i <- which(dist.mat == min(dist.mat), arr.ind=TRUE)[1,]
tip.pairs[i,] <- rownames(dist.mat)[min.i]
mask <- !(rownames(dist.mat) %in% tip.pairs[i,])
dist.mat <- dist.mat[mask, mask]
}
}
y <- x[tip.pairs[,1]] - x[tip.pairs[,2]]
names(y) <- apply(tip.pairs, 1, function(s) str_c(s, collapse="-"))
return(y)
}
# PIC Correlation
picCor <- function(d, xvar, yvar, xlog, ylog, phylogeny, method="spearman", pic.pairwise=FALSE){
subd <- prepareDataframe(d, xvar, yvar, xlog, ylog)
subd <- subd[!is.na(subd$xvar),]
if (nrow(subd)<3){
return("NA")
}
row.names(subd) <- subd$Species
X <- subd[,"xvar"]
Y <- subd[,"yvar"]
names(X) <- names(Y) <- row.names(subd)
subtree <- keep.tip(phylogeny, row.names(subd))
if(pic.pairwise){
pic.X <- picPairwiseTips(X, subtree, method="minimum")
pic.Y <- picPairwiseTips(Y, subtree, method="minimum")
} else {
pic.X <- pic(X, subtree)
pic.Y <- pic(Y, subtree)
}
cor.res <- cor.test(pic.X, pic.Y, method=method)
return(list("PIC1"=pic.X, "PIC2"=pic.Y, "cor"=cor.res))
}
# PIC Regression
picModel <- function(d, xvar, yvar, xlog, ylog, phylogeny){
subd <- prepareDataframe(d, xvar, yvar, xlog, ylog)
subd <- subd[!is.na(subd$xvar),]
if (nrow(subd)<3){
return("NA")
}
row.names(subd) <- subd$Species
X <- subd[,"xvar"]
Y <- subd[,"yvar"]
names(X) <- names(Y) <- row.names(subd)
subtree <- keep.tip(phylogeny, row.names(subd))
pic.X <- pic(X, subtree)
pic.Y <- pic(Y, subtree)
mod = lm(pic.Y ~ 0 + pic.X)
df = data.frame(pic.X, pic.Y)
df_mod = list("model" = mod, "data" = df)
return(df_mod)
}
# Extract r2 & pval
extract_p <- function(mod, i){
# coef <- round(summary(mod)$coefficients[i,4], digits = 4)
coef <- summary(mod)$coefficients[i,4]
return(coef)
}
# Extract transformed variance-covariance matrix
extract_vcm <- function(mod){
mx <- mod$Vt
sp <- mod$data$phy$tip.label
row.names(mx) <- sp
colnames(mx) <- sp
df <- melt(mx)
return(df)
}
# Extract data for particular comparison(s)
GetExperimentData <- function(xy_data, experiment){
out.tab <- data.table()
for(i in experiment){
out.tab <- rbindlist(list(out.tab, xy_data[[i]]))
}
return(out.tab)
}
|
3a250753f2942bf96e2c325d6288a7275bf22511
|
db6b7886398c4602d858fbb1855ce6d8c91b3e68
|
/man/agg_vline.Rd
|
493127f1f1dca74eae9d8ce6150a83ed6f2113c2
|
[
"MIT"
] |
permissive
|
angusmoore/arphit
|
08c1bec58bf22d29999204480e962547c1486add
|
389efbf00b0775d1e1ec6b0f8f2311eff72bb1b0
|
refs/heads/master
| 2023-02-08T06:20:35.082649
| 2021-02-09T05:27:35
| 2021-02-09T05:27:35
| 104,701,875
| 3
| 4
|
MIT
| 2021-02-09T05:27:36
| 2017-09-25T04:02:59
|
R
|
UTF-8
|
R
| false
| true
| 753
|
rd
|
agg_vline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gg-constructors.R
\name{agg_vline}
\alias{agg_vline}
\title{Add a vertical line to your graph}
\usage{
agg_vline(x, colour = "black", panel, lwd = 1, lty = 1)
}
\arguments{
\item{x}{The x coordinate to draw the vertical line at}
\item{colour}{The colour of the line (default black)}
\item{panel}{Which panel should the line be placed on? You can specify a
vector of panels (e.g. `panel = c("1","3")`) to apply the line to multiple
panels at once.}
\item{lwd}{(Optional, default 1) The line width}
\item{lty}{(Optional, default 1) The line type (uses R line types)}
}
\description{
Add a vertical line to your graph
}
\examples{
arphitgg() + agg_vline(x=2003,panel="1")
}
|
8b4b64096dc3338c2590830b0e178c0ac084e4b3
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/TESS/R/tess.likelihood.rateshift.R
|
eab842233a2db2957d8f1d35eeb9ba41dca48aa1
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,147
|
r
|
tess.likelihood.rateshift.R
|
################################################################################
#
# tess.likelihood.rateshift.R
#
# Copyright (c) 2012- Sebastian Hoehna
#
# This file is part of TESS.
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership and licensing.
#
# TESS is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# TESS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with TESS; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
################################################################################
################################################################################
#
# @brief Computation of the likelihood for a given tree under a birth-death rate-shift model (i.e. piecewise constant rates).
#
# @date Last modified: 2015-05-28
# @author Sebastian Hoehna
# @version 2.0
# @since 2012-09-22, version 1.3
#
# @param times vector vector of branching times
# @param times vector branching times
# @param lambda vector speciation rates
# @param mu vector extinction rates
# @param rateChangeTimesLambda vector speciation rates
# @param rateChangeTimesMu vector extinction rates
# @param massExtinctionTimes vector time at which mass-extinctions happen
# @param massExtinctionSurvivalProbabilities vector survival probability of a mass extinction event
# @param samplingProbability scalar probability of uniform sampling at present
# @param samplingStrategy string Which strategy was used to obtain the samples (taxa). Options are: uniform|diversified|age
# @param MRCA boolean does the tree start at the mrca?
# @param CONDITITON string do we condition the process on nothing|survival|taxa?
# @param log boolean likelhood in log-scale?
# @return scalar probability of the speciation times
#
################################################################################
tess.likelihood.rateshift <- function( times,
lambda,
mu,
rateChangeTimesLambda = c(),
rateChangeTimesMu = c(),
massExtinctionTimes = c(),
massExtinctionSurvivalProbabilities = c(),
missingSpecies = c(),
timesMissingSpecies = c(),
samplingStrategy = "uniform",
samplingProbability = 1.0,
MRCA=TRUE,
CONDITION="survival",
log=TRUE) {
if ( length(lambda) != (length(rateChangeTimesLambda)+1) || length(mu) != (length(rateChangeTimesMu)+1) ) {
stop("Number of rate-change times needs to be one less than the number of rates!")
}
if ( length(massExtinctionTimes) != length(massExtinctionSurvivalProbabilities) ) {
stop("Number of mass-extinction times needs to equal the number of mass-extinction survival probabilities!")
}
if ( length(missingSpecies) != length(timesMissingSpecies) ) {
stop("Vector holding the missing species must be of the same size as the intervals when the missing speciation events happend!")
}
if ( CONDITION != "time" && CONDITION != "survival" && CONDITION != "taxa" ) {
stop("Wrong choice of argument for \"CONDITION\". Possible option are time|survival|taxa.")
}
if ( samplingStrategy != "uniform" && samplingStrategy != "diversified") {
stop("Wrong choice of argument for \"samplingStrategy\". Possible option are uniform|diversified.")
}
# make sure the times and values are sorted
if ( length(rateChangeTimesLambda) > 0 ) {
sortedRateChangeTimesLambda <- sort( rateChangeTimesLambda )
lambda <- c(lambda[1], lambda[ match(sortedRateChangeTimesLambda,rateChangeTimesLambda)+1 ] )
rateChangeTimesLambda <- sortedRateChangeTimesLambda
}
if ( length(rateChangeTimesMu) > 0 ) {
sortedRateChangeTimesMu <- sort( rateChangeTimesMu )
mu <- c(mu[1], mu[ match(sortedRateChangeTimesMu,rateChangeTimesMu)+1 ] )
rateChangeTimesMu <- sortedRateChangeTimesMu
}
if ( length(massExtinctionTimes) > 0 ) {
sortedMassExtinctionTimes <- sort( massExtinctionTimes )
massExtinctionSurvivalProbabilities <- massExtinctionSurvivalProbabilities[ match(sortedMassExtinctionTimes,massExtinctionTimes) ]
massExtinctionTimes <- sortedMassExtinctionTimes
}
# join the times of the rate changes and the mass-extinction events
if ( length( rateChangeTimesLambda ) > 0 || length( rateChangeTimesMu ) > 0 || length( massExtinctionTimes ) > 0 ) {
changeTimes <- sort( unique( c( rateChangeTimesLambda, rateChangeTimesMu, massExtinctionTimes ) ) )
} else {
changeTimes <- c()
}
speciation <- rep(NaN,length(changeTimes)+1)
extinction <- rep(NaN,length(changeTimes)+1)
mep <- rep(NaN,length(changeTimes))
speciation[1] <- lambda[1]
if ( length(lambda) > 1 ) {
speciation[ match(rateChangeTimesLambda,changeTimes)+1 ] <- lambda[ 2:length(lambda) ]
}
extinction[1] <- mu[1]
if ( length(mu) > 1 ) {
extinction[ match(rateChangeTimesMu,changeTimes)+1 ] <- mu[ 2:length(mu) ]
}
if ( length( massExtinctionSurvivalProbabilities ) > 0 ) {
mep[ match(massExtinctionTimes,changeTimes) ] <- massExtinctionSurvivalProbabilities[ 1:length(massExtinctionSurvivalProbabilities) ]
}
for ( i in seq_len(length(changeTimes)) ) {
if ( is.null(speciation[i+1]) || !is.finite(speciation[i+1]) ) {
speciation[i+1] <- speciation[i]
}
if ( is.null(extinction[i+1]) || !is.finite(extinction[i+1]) ) {
extinction[i+1] <- extinction[i]
}
if ( is.null(mep[i]) || !is.finite(mep[i]) ) {
mep[i] <- 1.0
}
}
rateChangeTimes <- changeTimes
massExtinctionTimes <- changeTimes
lambda <- speciation
mu <- extinction
massExtinctionSurvivalProbabilities <- mep
PRESENT <- max(times)
nTaxa <- length(times) + 1
times <- PRESENT - sort(times,decreasing=TRUE)
# if we condition on the MRCA, then we need to remove the root speciation event
if ( MRCA == TRUE ) {
times <- times[-1]
}
# set the uniform taxon sampling probability
if (samplingStrategy == "uniform") {
rho <- samplingProbability
} else {
rho <- 1.0
}
# initialize the log likelihood
lnl <- 0
# what do we condition on?
# did we condition on survival?
if ( CONDITION == "survival" || CONDITION == "taxa" ) lnl <- - tess.equations.pSurvival.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,rho,0,PRESENT,PRESENT,log=TRUE)
# multiply the probability of a descendant of the initial species
lnl <- lnl + tess.equations.p1.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,rho,0,PRESENT,log=TRUE)
# add the survival of a second species if we condition on the MRCA
if ( MRCA == TRUE ) {
lnl <- 2*lnl
}
# did we condition on observing n species today
if ( CONDITION == "taxa" ) lnl <- lnl + tess.equations.pN.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,rho,nTaxa,0,PRESENT,SURVIVAL=TRUE,MRCA,log=TRUE)
# if we assume diversified sampling, we need to multiply with the probability that all missing species happened after the last speciation event
if ( samplingStrategy == "diversified" ) {
# We use equation (5) of Hoehna et al. "Inferring Speciation and Extinction Rates under Different Sampling Schemes"
lastEvent <- times[length(times)]
p_0_T <- 1.0 - tess.equations.pSurvival.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,1.0,0,PRESENT,PRESENT,log=FALSE) * exp((mu-lambda)*PRESENT)
p_0_t <- 1.0 - tess.equations.pSurvival.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,1.0,lastEvent,PRESENT,PRESENT,log=FALSE)*exp((mu-lambda)*(PRESENT-lastEvent))
F_t <- p_0_t / p_0_T
# get an estimate of the actual number of taxa
m <- round(nTaxa / samplingProbability)
# remove the number of species that we started with
k <- 1
if ( MRCA == TRUE ) k <- 2
lnl <- lnl + (m-nTaxa) * log(F_t) + lchoose(m-k,nTaxa-k)
}
# multiply the probability for the missing species
if ( length(missingSpecies) > 0 ) {
# compute the rate
prev_time <- 0
rate <- 0
# add mass-extinction
for (j in seq_len(length(rateChangeTimes)) ) {
rate <- rate + ifelse( PRESENT >= rateChangeTimes[j], (mu[j] - lambda[j])*(rateChangeTimes[j]-prev_time) - log(massExtinctionSurvivalProbabilities[j]), 0 )
prev_time <- ifelse( PRESENT >= rateChangeTimes[j], rateChangeTimes[j], 0)
}
# add the final rate interval
rate <- rate + ifelse( PRESENT > prev_time, (mu[length(mu)] - lambda[length(lambda)])*(PRESENT-prev_time), 0 )
# add sampling
rate <- rate - log(samplingProbability)
p_0_T <- 1.0 - exp( tess.equations.pSurvival.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,1.0,0,PRESENT,PRESENT,log=TRUE) + rate )
# now iterate over the vector of missing species per interval
lastEvent <- timesMissingSpecies
# compute the rate
prev_time <- lastEvent
rate <- 0
# add mass-extinction
for (j in seq_len(length(rateChangeTimes)) ) {
rate <- rate + ifelse( lastEvent < rateChangeTimes[j] & PRESENT >= rateChangeTimes[j], (mu[j] - lambda[j])*(rateChangeTimes[j]-prev_time) - log(massExtinctionSurvivalProbabilities[j]), 0 )
prev_time <- ifelse( lastEvent < rateChangeTimes[j] & PRESENT >= rateChangeTimes[j], rateChangeTimes[j], lastEvent)
}
# add the final rate interval
rate <- rate + ifelse( PRESENT > prev_time, (mu[length(mu)] - lambda[length(lambda)])*(PRESENT-prev_time), 0 )
# add sampling
rate <- rate - log(samplingProbability)
p_0_t <- 1.0 - exp( tess.equations.pSurvival.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,1.0,lastEvent,PRESENT,PRESENT,log=TRUE) + rate )
log_F_t <- log(p_0_t) - log(p_0_T)
# get an estimate of the actual number of taxa
m <- missingSpecies
# remove the number of species that we started with
lnl <- lnl + sum( m * log_F_t ) #+ lchoose(m-k,nTaxa-k)
}
if ( length(rateChangeTimes) > 0 ) {
speciation <- function(times) {
idx <- findInterval(times,rateChangeTimes)+1
idx[ idx > length(lambda) ] <- length(lambda)
return ( lambda[idx] )
}
} else {
speciation <- function(times) rep(lambda[1],length(times))
}
# multiply the probability for each speciation time
lnl <- lnl + sum( log(speciation(times) ) ) + sum(tess.equations.p1.rateshift(lambda,mu,rateChangeTimes,massExtinctionSurvivalProbabilities,rho,times,PRESENT,log=TRUE))
if (is.nan(lnl)) lnl <- -Inf
if ( log == FALSE ) {
lnl <- exp(lnl)
}
return (lnl)
}
|
028a9b2dde42ed64c2411c923ee0493d2a7e7131
|
571ded08fd6515e01ebeea788688cbd238df7803
|
/R/benchplot.r
|
f322842259109374bad718d6e0b5e8532f1253d7
|
[] |
no_license
|
soh-i/Benchmarking
|
3391caf01d6738d525f4290eccc002841d4fc878
|
7f613545fcd10a652b47b6dca295036850af40c0
|
refs/heads/master
| 2020-05-20T10:07:05.950456
| 2013-11-11T15:53:01
| 2013-11-11T15:53:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
benchplot.r
|
library('ggplot2')
# Add data as data.frame
Zhu2013 <- data.frame(precision=0.6178, recall=0.0042, Label="Zhu2013")
PooledSamplesAlu <- data.frame(precision=0.3227, recall=0.0635, Label="PooledSamplesAlu")
PooledSamplesRepetitiveNonAlu <- data.frame(precision=0.4168, recall=0.0032 , Label="PooledSamplesRepetitiveNonAlu")
PooledSamplesNonRepetitive <- data.frame(precision=0.2701, recall=0.0020, Label="PooledSamplesNonRepetitive")
data <- rbind(Zhu2013, PooledSamplesAlu,PooledSamplesRepetitiveNonAlu,PooledSamplesNonRepetitive)
g <- ggplot(
data,
aes(x = precision, y = recall) ) +
geom_point(aes(colour = Label), shape = 19, size = 7) +
ylim(0,1) +
xlim(0,1) +
labs(title = "Benchmarking test for humna data, answer set is DARNED hg19",
x = "Precision",
y = "Recall"
) +
theme_bw(base_size=16, base_family="Helvetica")
plot(g)
|
3a2b44ee9383a2ff5b8d4e7dddf2febf021752b2
|
91470b7555082ff514224bf775d2ecb3379fb2cd
|
/macro.R
|
a71aa41b8845d4413b5f7913fce325f5a30f788f
|
[] |
no_license
|
ChrisComiskey/Misc
|
5bbda1f7a784198045a6a0dc4bdbfe934e604468
|
8d218c45ce4734f080fbfe181f60f350c3b0535e
|
refs/heads/master
| 2020-03-28T02:40:31.797862
| 2018-08-28T22:00:54
| 2018-08-28T22:00:54
| 147,588,803
| 0
| 0
| null | 2018-09-05T22:57:24
| 2018-09-05T22:57:24
| null |
UTF-8
|
R
| false
| false
| 14,718
|
r
|
macro.R
|
devtools::load_all("/Users/cwcomiskey/Desktop/ODG/Macro-models/macro")
macro::depends() # load all dependencies
# Regression: lm(CPI ~ Top25, ...) =======
lin_reg <- lm(CPI ~ ., data = select(reg_dat, -date))
summary(lin_reg) # R^2: 0.9865
sum_na <- function(x) sum(as.numeric(is.na(x)))
apply(reg_dat, FUN = sum_na, MARGIN = 2) # NAs by column; a lot
ggplot() + geom_line(aes(x = residuals(lin_reg)))
# previous monthly changes (lag 1, 12) as covariates
reg_dat <- reg_dat %>% mutate(last_month = lag(CPI, n = 1)) # last month's change
lin_reg <- lm(CPI ~ ., data = select(reg_dat, -date))
summary(lin_reg)
# --> last month's change is not a significant predictor
# --> 12 months ago change is not a significant predictor
# Plot regression results ======
# Add fitted values to reg_dat for plotting by date ==
reg_dat <- reg_dat %>% mutate(index = rownames(reg_dat))
fitt <- cbind.data.frame(fitted(lin_reg), residuals(lin_reg))
fitt <- mutate(fitt, index = row.names(fitt))
colnames(fitt) <- c("fit", "residuals", "index")
reg_dat <- left_join(reg_dat, fitt)
# Manual melt for plotting ==
plot_dat <- reg_dat %>% select(date, CPI, fit, residuals) %>% drop_na()
plot_dat_CPI <- data.frame(plot_dat[,1:2], "CPI")
names(plot_dat_CPI) <- c("date", "Change", "Source")
plot_dat_fit <- data.frame(plot_dat[,c(1,3)], "Fit")
names(plot_dat_fit) <- c("date", "Change", "Source")
plot_dat2 <- rbind.data.frame(plot_dat_CPI, plot_dat_fit)
# Residuals: geom_point, acf, pacf =====
ggplot(data = plot_dat) +
geom_line(aes(y = residuals, x = date))
# ggsave("ModelResiduals.jpg", width = 8, height = 4)
autoplot(decompose(plot_dat$residuals))
autoplot(acf(plot_dat$residuals, na.action = na.pass, plot = FALSE, lag.max = 24)) +
ggtitle("Model Residual Autocorrelation") +
theme(plot.title = element_text(hjust = 0.5))
# ggsave("ModResidACF.jpg", width = 8, height = 4)
autoplot(pacf(plot_dat$residuals, na.action = na.pass, plot = FALSE, lag.max = 24)) +
ggtitle("Model Residual Partial Autocorrelation") +
theme(plot.title = element_text(hjust = 0.5))
# ggsave("ModResidPACF.jpg", width = 8, height = 4)
# Changes: CPI vs. Fit (plot) =====
ggplot(data = plot_dat2) +
geom_line(aes(x = date, y = Change, color = Source), size = 1.25)
ggsave("CPIvFit.jpg", width = 10, height = 5)
ggplot() +
geom_point(aes(x = fitted(lin_reg),
y = residuals.lm(lin_reg))) +
xlab("Fitted Values") + ylab("Residuals")
autoplot(lin_reg)[1:2]
# Train and test ======
train_dat <- reg_dat[1:1243,]
test_dat <- reg_dat[1244:1263,]
lin_reg2 <- lm(CPI ~ ., data = select(train_dat, -date))
preds <- predict.lm(lin_reg2, test_dat)
1- sum( (preds - test_dat$CPI)^2) / sum( (test_dat$CPI - mean(test_dat$CPI))^2 )
test_dat$pred <- predict(lin_reg2, test_dat)
train_dat <- train_dat %>% drop_na()
ggplot() +
geom_line(data = rbind.data.frame(train_dat[,1:2], test_dat[,1:2]),
aes(x = date, y = CPI), color = "red", size = 1.25) +
geom_line(data = test_dat, aes(x = date, y = pred),
color = "blue", size = 1.25) +
ylab("CPI Change") +
ggtitle(expression(paste("Linear Regression, ", R^{2}, " = 0.91")))
ggsave("CPIvFit_train_test.jpg", width = 10, height = 5)
# auto.arima(...) and plots ======
lin_reg <- lm(CPI ~ ., data = select(reg_dat, -date))
t <- auto.arima(lin_reg$residuals, max.p = 3, max.d = 3, max.q = 3, max.P = 3, max.Q = 3, max.D = 13 )
autoplot(acf(t$residuals, lag.max = 48, plot = FALSE)) +
ggtitle("New Residuals: Regression with AR(1) Errors") +
theme(plot.title = element_text(hjust = 0.5))
# ggsave("Mod+AR1Resid.jpg", height = 4, width = 8)
# arima(...) and plots =======
d <- drop_na(reg_dat) %>% select(-date)
mod <- arima(d$CPI, order = c(1,0,0), xreg = select(d, -CPI))
1 - sum( (fitted(mod) - d$CPI)^2) / sum( (d$CPI - mean(d$CPI))^2 ) # [1] 0.9882776
ggplot() + geom_point(aes(x = 1:99, y = residuals(mod)))
autoplot(acf(mod$residuals, lag.max = 48, plot = FALSE))
# arima(...) -- train and test ======
# Divide into training data and test data
d_train <- d[1:90,]
d_test <- d[91:99,]
# Fit model with training data, white noise residuals ==== #
lin_reg <- lm(CPI ~ ., data = d_train)
summary(lin_reg)
autoplot(acf(lin_reg$residuals, plot = FALSE))
preds <- predict.lm(lin_reg, d_test) # predictions on test data
1- sum( (preds - d_test$CPI)^2) / sum( (d_test$CPI - mean(d_test$CPI))^2 ) # 0.9317969
# arima(...) fit with AR(1) residuals ====== #
lin_reg <- arima(d_train$CPI, order = c(1,0,0), xreg = select(d_train, -CPI))
autoplot(acf(lin_reg$residuals, plot = FALSE))
preds <- predict(lin_reg, newxreg = select(d_test, -CPI))$pred
1- sum( (preds - d_test$CPI)^2) / sum( (d_test$CPI - mean(d_test$CPI))^2 ) # 0.9368632
# CPI-change ~= top25 %*% RIWs (incomplete) ======
# Create RIW df ====== #
# riws <- read_table("riws") %>%
# drop_na() %>%
# mutate(`Item and group` = gsub("\\.*", "" , riws$`Item and group`))
# strata_riws <- riws %>% filter(`Item and group` %in% strata_dat$item_name)
# names(strata_riws) <- c("item_name", "CPI-U", "CPI-W")
# strata_riws <- left_join(strata_riws, item_dat)
# devtools::use_data(strata_riws, overwrite = TRUE)
# ===================== #
# Match: "reg_dat" columns to "strata_riws" rows, for matrix mult ==== #
# strata_riws$item_code %in% names(reg_dat)
# names(reg_dat) %in% strata_riws$item_code)
riws25 <- strata_riws %>% filter(item_code %in% names(reg_dat))
reg_dat <- reg_dat %>% select(-SERF01, -date, -CPI) %>% drop_na()
riws25 <- riws25[match(
names(reg_dat)[-c(1,2)],
riws25$item_code
),
] # to make row order match reg_dat column order, for matrix mult
CPI <- data.frame(reg_dat[,c("date", "CPI")], "CPI")
colnames(CPI) <- c("date", "value", "cat")
calc <- data.frame(reg_dat[,c("date")], w_avg, "RIWs")
colnames(calc) <- c("date", "value", "cat")
l_reg <- data.frame(reg_dat[,c("date")], lin_reg$fitted.values, "Lin_Reg")
colnames(l_reg) <- c("date", "value", "cat")
plot_dat <- rbind.data.frame(CPI, calc, l_reg)
ggplot(data = plot_dat) +
geom_line(aes(x = date, y = value, color = cat), size = 1.25)
ggsave("CPI_RIWs_LinReg.jpg", width = 12, height = 5)
# lm(...), f_25, f_71 =====================================
# Note: 2015 - 2016 weights
# https://www.bls.gov/cpi/tables/relative-importance/home.htm
riws25 <- strata_riws %>% filter(item_code %in% names(reg_dat))
strata25 <- strata_dat %>%
filter(item_code %in% riws25$item_code)
# Create proper df for regression ========= %
for(i in 1:24){
if(i == 1) {
strata_reg_dat <- CPI %>% select(date, value)
names(strata_reg_dat) <- c("date", "CPI")
}
strata_i <- strata25 %>%
filter(item_code == riws25[i,"item_code"]) %>%
select(value, date)
strata_reg_dat <- full_join(strata_reg_dat, strata_i, by = "date")
strata_reg_dat <- within(strata_reg_dat, rm(series_id))
names(strata_reg_dat)[names(strata_reg_dat) == 'value'] <- paste(riws25[i,"item_code"])
if(i == 24) {
rm(strata_i, i)
strata_reg_dat <- drop_na(strata_reg_dat)
}
}
# Regression ========= %
lin_reg <- lm(CPI ~ ., data = select(strata_reg_dat, -date))
# Top 25 RIW calculation ========= %
riws25 <- riws25[match(
names(strata_reg_dat)[-c(1,2)],
riws25$item_code),]
riws <- as.vector(t(riws25[,2]))
CPI <- as.vector(strata_reg_dat[,2])
strata <- strata_reg_dat %>% select(-date, -CPI)
for(i in 2:100){
if(i ==2) CPI.hat25 <- CPI[1]
CPI.hat25[i] <- CPI[i-1]*(sum(riws*(strata[i,]/strata[i-1,]))/sum(riws))
if(i == 100) rm(i)
}
1- sum( (CPI.hat25 - strata_reg_dat$CPI)^2) / sum( (strata_reg_dat$CPI - mean(strata_reg_dat$CPI))^2 )
# CPI from 70 RIWs ================= #
strata_riws_ordered <- strata_riws[match(
names(strata70_reg_dat)[-c(1,2)],
strata_riws$item_code),]
riws <- as.vector(t(strata_riws_ordered[,2])); rm(strata_riws_ordered)
# CPI <- as.vector(strata70_reg_dat[,2])
strata <- strata70_reg_dat %>% select(-date, -CPI)
for(i in 2:100){
if(i ==2) CPI.hat <- CPI[1]
CPI.hat[i] <- CPI[i-1]*(sum(riws*(strata[i,]/strata[i-1,])))/100
}
1- sum( (CPI.hat - strata_reg_dat$CPI)^2) / sum( (strata_reg_dat$CPI - mean(strata_reg_dat$CPI))^2 )
# Plot ============== #
CPI <- data.frame(strata_reg_dat[,c("date", "CPI")], "CPI")
colnames(CPI) <- c("date", "value", "cat")
calc <- data.frame(strata_reg_dat[,c("date")], CPI.hat25, "RIW25")
colnames(calc) <- c("date", "value", "cat")
l_reg <- data.frame(strata_reg_dat[,c("date")], lin_reg$fitted.values, "Lin_Reg")
colnames(l_reg) <- c("date", "value", "cat")
calc70 <- data.frame(strata70_reg_dat[,c("date")], CPI.hat, "RIW70")
colnames(calc70) <- c("date", "value", "cat")
plot_dat <- rbind.data.frame(CPI, calc, l_reg, calc70); rm(CPI, calc70)
ggplot(data = plot_dat) +
geom_line(aes(x = date, y = value, color = cat), size = 1.75)
# ggsave("All.jpg", width = 12, height = 5)
# Changes plot ===========
CPI <- data.frame(strata_reg_dat$`date`[-1], diff(strata_reg_dat$`CPI`), "CPI")
colnames(CPI) <- c("date", "value", "cat")
calc <- data.frame(strata_reg_dat$`date`[-1], diff(CPI.hat25), "RIW25")
colnames(calc) <- c("date", "value", "cat")
l_reg <- data.frame(strata_reg_dat$`date`[-1], diff(lin_reg$fitted.values), "Lin_Reg")
colnames(l_reg) <- c("date", "value", "cat")
calc70 <- data.frame(strata_reg_dat$`date`[-1], diff(CPI.hat), "RIW70")
colnames(calc70) <- c("date", "value", "cat")
plot_dat2 <- rbind.data.frame(CPI, calc, l_reg, calc70); rm(CPI, calc, l_reg, calc70)
ggplot(data = plot_dat2) +
geom_line(aes(x = date, y = value, color = cat), size = 1.75) +
ggtitle("Month-to-month Changes") +
theme(plot.title = element_text(hjust = 0.5))
# ggsave("All_changes.jpg", width = 12, height = 5)
1- sum( (diff(CPI.hat) - diff(strata_reg_dat$`CPI`))^2) / sum( (diff(strata_reg_dat$`CPI`) - mean(diff(strata_reg_dat$`CPI`)))^2 )
# RIW25 = -0.2146562
# LinReg = 0.9744422
# RIW70 = 0.9445965
# 2010 CPI with calculated monthly weights =========================
# Create RIW df ====== #
load2009riws <- function(){
riws2009 <- read_table("2009RIWs_0708wts") %>%
drop_na() %>%
mutate(Item_name = gsub("\\.*", "" , `Expenditure category`)) %>%
select(Item_name, CPI_U = X2) %>%
left_join(., item_dat, by = c("Item_name" = "item_name")) %>%
# ...riws2009 has __(not sure)__ and item_dat == "Recorded music and music subscriptions"
filter(display_level == 2 |
Item_name %in% c("Airline fare", "Cable and satellite television and radio service"))
# mutate(RIW_norm = CPI_U/sum(CPI_U) * 100)
# Correct and add
riws2009[riws2009$Item_name == "Airline fare", c("Item_name", "item_code")] <- c("Airline fares", "SETG01")
riws2009[riws2009$Item_name == "Cable and satellite television and radio service", c("Item_name", "item_code")] <- c("Cable and satellite television service", "SERA02")
riws2009[68,] <- NA
riws2009[68,1] <- "Recorded music and music subscriptions"
riws2009[68,2] <- 0.638
riws2009[68,3] <- "SERA06"
riws2009[68,4] <- 2
return(riws2009)
# Diagnostics
# missing <- names(strata70_reg_dat)[!(names(strata70_reg_dat) %in% riws2009$item_code)]
# item_dat[item_dat$item_code %in% missing,]
# unique(filter(strata_dat, item_code %in% missing)[,"item_name"])
}
# riws2009 <- load2009riws()
data("riws2009"); head(riws2009)
data("strata70_reg_dat"); head(strata70_reg_dat)
w <- riws2009[match(
names(strata70_reg_dat)[-c(1,2)],
riws2009$item_code),]
s70 <- strata70_reg_dat[,-c(1,2)] # shorter name! all strata
cpi.hat <- data.frame(index = strata70_reg_dat$CPI[1]) # container
CPI <- data.frame(index = strata70_reg_dat$CPI) # CPI
for(m in 1:29){
if(m == 1){
weights <- as.data.frame(t(w[,"CPI_U"])) # initialize calculated weights cont.
colnames(weights) <- w$item_code # name weights same as index names
} else{
if(year(strata70_reg_dat$date[m]) > 2011){rm(m); break}
weights[m,] <- weights[1,] * (s70[m,] / s70[1,]) * (CPI[1,1] / CPI[m,1])
}
} # Calculate weights for CPI
for(m in 2:100){
if(year(strata70_reg_dat$date[m]) > 2011){break; rm(m)}
cpi.hat[m,1] <- CPI[m-1,1] * sum(weights[m-1,] * (s70[m,] / s70[m-1,]))/100
if(m == 100) rm(m)
} # Calculate CPI
1- sum( (diff(cpi.hat$index) - diff(CPI$index[1:25]))^2) / sum( (diff(CPI$index[1:25]) - mean(diff(CPI$index[1:25])))^2 )
CPIs <- data.frame(date = strata70_reg_dat$date[1:25],
value = cpi.hat$index, Cat = "CPI_hat")
CPI.hats <- data.frame(date = strata70_reg_dat$date[1:25],
value = CPI$index[1:25], Cat = "CPI")
# CPIs <- data.frame(date = strata70_reg_dat$date[2:25],
# value = diff(cpi.hat$index), Cat = "CPI_hat")
# CPI.hats <- data.frame(date = strata70_reg_dat$date[2:25],
# value = diff(CPI$index[1:25]), Cat = "CPI")
plot_dat <- rbind.data.frame(CPIs, CPI.hats)
ggplot(data = plot_dat) +
geom_line(aes(x = date, y = value, color = Cat), size = 1.5) +
ggtitle("CPI Month-to-month Change Estimates with Calculated Weights") +
theme(plot.title = element_text(hjust = 0.5))
# ggsave("Weights.jpg", width = 12, height = 5)
# 2010 CPI, calculated monthly weights, Top 25 only =================
prep <- function(){
data("reg_dat")
w <- riws2009 %>% filter(item_code %in% names(reg_dat)); rm(reg_dat)
s25 <- strata70_reg_dat %>% drop_na() %>%
filter(year(date) < 2012) %>%
select(one_of("date", "CPI", w$item_code))
w <- w[match(names(s25[-c(1,2)]), w$item_code),]
weights25 <- cbind.data.frame(date = s25$date, CPI = 0, matrix(ncol = 24, nrow = 25))
colnames(weights25) <- c("date", "CPI.hat", w$item_code)
weights25[1,3:26] <- w$CPI_U; rm(w)
}
prep()
for(m in 2:25){
weights25[m,3:26] <- weights25[1,3:26] * (s25[m,3:26] / s25[1,3:26]) * (s25[1,"CPI"] / s25[m,"CPI"])
if(m == 25) {rm(m); weights25$CPI.hat[1] <- s25$CPI[1]}
} # Calculate weights25 for CPI
for(m in 2:25){
weights25[m,"CPI.hat"] <- s25[m-1,"CPI"] *
sum(weights25[m-1,3:26] * (s25[m,3:26] / s25[m-1,3:26]))/sum(weights25[m-1,3:26])
if(m == 25) rm(m)
} # Calculate CPI
CPIs <- data.frame(date = strata70_reg_dat$date[2:25],
value = diff(cpi.hat$index), Cat = "RIW70")
CPI.hats <- data.frame(date = strata70_reg_dat$date[2:25],
value = diff(CPI$index[1:25]), Cat = "CPI")
CPI25 <- data.frame(date = weights25$date[2:25], value = diff(weights25$CPI.hat), Cat = "RIW25")
plot_dat <- rbind.data.frame(CPIs, CPI.hats, CPI25)
ggplot(data = plot_dat) +
geom_line(aes(x = date, y = value, color = Cat), size = 1.5) +
ggtitle("CPI Month-to-month Change Estimates with Calculated Weights") +
theme(plot.title = element_text(hjust = 0.5))
ggsave("CPI_70_25.jpg", width = 12, height = 5)
|
52afa7c7385b71686d66f892b85d3f7f0b1c8456
|
1aa92f850ce632811aaa74d769527a8037d8c484
|
/tests/check_set_threshold_type.R
|
2d67ac8e60c8fdc8012b9e4deed29ab265c08b76
|
[] |
no_license
|
cran/mvord
|
253c6e7deaf07bf5ac111571b6db307219f1597c
|
6699126154748d7510647afc7bda27066aad3549
|
refs/heads/master
| 2021-06-02T15:11:40.519370
| 2021-03-17T12:20:12
| 2021-03-17T12:20:12
| 102,715,261
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,279
|
r
|
check_set_threshold_type.R
|
library(mvord)
rho <- list()
rho$ndim <- 5
rho$error.structure$type <- "correlation"
rho$intercept.type = "fixed"
#rho$ntheta <- 1:5
#rho$threshold.values <- lapply(1:rho$ndim, function(j) rep(NA,rho$ntheta[j]))
rho$threshold.values <- list(c(NA),
c(NA,NA),
c(NA,NA,NA),
c(NA,NA,NA,NA),
c(NA,NA,NA,NA,NA))
rho$formula <- y ~ 0 + X1 + X2 + X3
rho$intercept = FALSE
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(identical(mvord:::set_threshold_type(rho), "flexible"))
rho$error.structure$type <- "correlation"
rho$threshold.values <- list(c(1),
c(2,NA),
c(3,NA,NA),
c(4,NA,NA,NA),
c(5,NA,NA,NA,NA))
rho$formula <- y ~ X1 + X2 + X3
rho$intercept = TRUE
rho$intercept.type = "flexible"
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(identical(mvord:::set_threshold_type(rho), "fix1first"))
rho$error.structure$type <- "correlation"
rho$threshold.values <- list(c(1),
c(2,3),
c(3,4,NA),
c(4,5,NA,NA),
c(5,6,NA,NA,NA))
rho$formula <- y ~ X1 + X2 + X3
rho$intercept = TRUE
rho$intercept.type = "flexible"
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(identical(mvord:::set_threshold_type(rho), "fix2first"))
rho$error.structure$type <- "covariance"
rho$threshold.values <- list(c(1),
c(2,3),
c(3,4,NA),
c(4,5,NA,NA),
c(5,6,NA,NA,NA))
rho$formula <- y ~ X1 + X2 + X3
rho$intercept = TRUE
rho$intercept.type = "flexible"
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
rho$binary <- TRUE
#error here
mvord:::check(!is.null(attr(try(
mvord:::set_threshold_type(rho)
, silent = TRUE), "condition")))
rho$error.structure$type <- "covariance"
rho$threshold.values <- list(c(1),
c(2,3),
c(3,NA,4),
c(4,NA,NA,5),
c(5,NA,NA,NA,6))
rho$formula <- y ~ X1 + X2 + X3
rho$intercept = TRUE
rho$intercept.type = "flexible"
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
#error here
#check(identical(set_threshold_type(rho), "fix2firstlast"))
mvord:::check(!is.null(attr(try(
mvord:::set_threshold_type(rho)
, silent = TRUE), "condition")))
rho$error.structure$type <- "correlation"
rho$threshold.values <- list(c(1),
c(2,3),
c(3,NA,4),
c(4,NA,NA,5),
c(5,NA,NA,NA,6))
rho$formula <- y ~ X1 + X2 + X3
rho$intercept = TRUE
rho$intercept.type = "flexible"
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(identical(mvord:::set_threshold_type(rho), "fix2firstlast"))
rho$error.structure$type <- "covariance"
rho$threshold.values <- list(c(1),
c(2,NA),
c(3,NA,NA),
c(4,NA,NA,NA),
c(5,NA,NA,NA,NA))
rho$formula <- y ~ 0 + X1 + X2 + X3
rho$intercept = FALSE
rho$intercept.type = "fixed"
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(identical(mvord:::set_threshold_type(rho), "fix1first"))
#----------------------------------------------------------------------------------------------------
#ERRORS
rho$error.structure$type <- "covariance"
rho$intercept.type = "fixed"
rho$threshold.values <- list(c(NA),
c(NA,NA),
c(NA,NA,NA),
c(NA,NA,NA,NA),
c(NA,NA,NA,NA,NA))
rho$formula <- y ~ 0 + X1 + X2 + X3
rho$intercept = FALSE
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(!is.null(attr(try(mvord:::set_threshold_type(rho), silent = TRUE), "condition")))
# e <- try(set_threshold_type(rho), silent = TRUE)
# e <- try(stop("throwing a try-error"))
# !is.null(attr(e, "condition"))
# !is.null(attr(try(set_threshold_type(rho), silent = TRUE), "condition"))
rho$error.structure$type <- "covariance"
rho$intercept.type = "flexible"
rho$threshold.values <- list(c(1),
c(2,NA),
c(3,NA,NA),
c(4,NA,NA,NA),
c(5,NA,NA,NA,NA))
rho$formula <- y ~ 1 + X1 + X2 + X3
rho$intercept = TRUE
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(!is.null(attr(try(mvord:::set_threshold_type(rho), silent = TRUE), "condition")))
rho$error.structure$type <- "correlation"
rho$intercept.type = "flexible"
rho$threshold.values <- list(c(NA),
c(NA,NA),
c(NA,NA,NA),
c(NA,NA,NA,NA),
c(NA,NA,NA,NA,NA))
rho$formula <- y ~ 1 + X1 + X2 + X3
rho$intercept = TRUE
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(!is.null(attr(try(mvord:::set_threshold_type(rho), silent = TRUE), "condition")))
rho$error.structure$type <- "correlation"
rho$threshold.values <- list(c(1),
c(2,NA),
c(3,4,5),
c(4,NA,NA,NA),
c(5,NA,NA,NA,NA))
rho$formula <- y ~ X1 + X2 + X3
rho$intercept = TRUE
rho$intercept.type = "flexible"
rho$ntheta <- sapply(seq_len(rho$ndim), function(j) length(rho$threshold.values[[j]]))
mvord:::check(!is.null(attr(try(mvord:::set_threshold_type(rho), silent = TRUE), "condition")))
|
6672f7c740baaf038d1b9089d90ef7d623f3e16b
|
8e5f9b92af6688c11fe1f6e285869a2a6314b3ed
|
/R/cal_ORA_cluster_similarity.R
|
f78fe6e9b7ad470c55b8e1efb0209b4668e4066b
|
[] |
no_license
|
huzhenyu115/LEGO
|
c9e6d68f698577399843520a8c5d0600dde66cb1
|
92353393807ab7eb3e25ac61c4f4a75e8a41479a
|
refs/heads/master
| 2022-06-23T02:41:57.650298
| 2020-05-08T02:42:18
| 2020-05-08T02:42:18
| 259,661,359
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,688
|
r
|
cal_ORA_cluster_similarity.R
|
# this function aims to calculate similarity between cluster results
cal_ORA_cluster_similarity <- function(enrich_file, geneset_over_file){
#enrich_file = ARGV[0] ## output of ORA_filter.pl
#geneset_over_file = $ARGV[1]
#$geneset_over_file = "demo/GeneSet_human.txt_FC2_human_overlap_union.txt"
geneset_over_file_input <- read.table(geneset_over_file, header = F, sep = "\t", fill = TRUE)
geneset_over_file_input <- as.matrix(geneset_over_file_input)
score <- matrix(nrow=length(unique(geneset_over_file_input[,1])),ncol=length(unique(geneset_over_file_input[,2])))
row_score <- unique(geneset_over_file_input[,1])
col_score <- unique(geneset_over_file_input[,2])
for(i in length(score[,1])){
gs1 = geneset_over_file_input[i,1]
gs2 = geneset_over_file_input[i,2]
score[gs1,gs2] = geneset_over_file_input[i,3]
score[gs2,gs1] = geneset_over_file_input[i,3]
}
## enrich_file input
enrich_file_input <- read.table(enrich_file, header = F, sep = "\t", fill = TRUE)
enrich_file_input <- as.matrix(enrich_file_input)
for(i in length(enrich_file_input[,1])){
if(str_extract(gs, "^Results for (.*)")){
geo = str_extract(gs, "^Results for (.*)")
next
}
if(str_extract(gs, "^Cluster(.*)\:")){
cluster_id = str_extract(gs, "^Cluster(.*)\:")
next
}
a = split "\t"
for(each in a){
tmp = split "_",each
name = join "_",tmp[0:(length(tmp)-1)]
result[geo,cluster_id,name] = 1
}
}
##
all_geo = keys(result)
for(i in all_geo){
cluster_id_1 = keys(result[[i]])
for(j in all_geo){
cluster_id_2 = keys(result[[j]])
ss = 0
for(c1 in cluster_id_1){ ## for each cluster 1, find max
max_score = 0
for(c2 in cluster_id_2){
n1_arr = keys(result[i,c1])
n2_arr = keys(result[j,c2])
## count
tmp <- hash()
map{tmp{$_}=1}n1_arr
D=0
foreach n (n2_arr){
if(tmp[[n]]){
D++
}
}
B = length(n1_arr)
C = length(n2_arr)
#A = B+C-D
if(B<C){
tmp_score = D/B
}else{
tmp_score = D/C
}
t1 = 0
for(n1 in n1_arr){
tmp_max_score = 0
for(n2 in n2_arr){
if(score[n1,n2]){
tmp_max_score = (tmp_max_score>score[n1,n2])?tmp_max_score:score[n1,n2]
}
if(n1 == n2){
tmp_max_score = 1
}
}
t1 += tmp_max_score
}
tmp_score = t1/(length(n1_arr))
max_score = (max_score>tmp_score)?max_score:tmp_score
}
#print i."\t".j."\t".max_score."\n"
ss+=max_score
}
score[i,j] = ss/(length(cluster_id_1))
}
}
## final score
for(i in all_geo){
for(j in all_geo){
final = (score[i,j]+score[j,i])/2
cat(i, "\t", j, "\t", final, "\n")
}
}
}
|
68db491727b87cbd7bc17c3209f19a1426f73f22
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/patentsview/vignettes/getting-started.R
|
18c2ca1094948e64486998f2bc723cef0011d49c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,701
|
r
|
getting-started.R
|
## ---- echo = FALSE, message = FALSE--------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
library(patentsview)
search_pv(
query = '{"_gte":{"patent_date":"2007-01-01"}}',
endpoint = "patents"
)
## ------------------------------------------------------------------------
qry_funs$gte(patent_date = "2007-01-01")
## ------------------------------------------------------------------------
with_qfuns(
and(
gte(patent_date = "2007-01-01"),
text_phrase(patent_abstract = c("computer program", "dog leash"))
)
)
## ------------------------------------------------------------------------
search_pv(
query = '{"_gte":{"patent_date":"2007-01-01"}}',
endpoint = "patents",
fields = c("patent_number", "patent_title")
)
## ------------------------------------------------------------------------
retrvble_flds <- get_fields(endpoint = "patents")
head(retrvble_flds)
## ------------------------------------------------------------------------
search_pv(
query = qry_funs$eq(inventor_last_name = "chambers"),
page = 2, per_page = 150 # gets records 150 - 300
)
## ------------------------------------------------------------------------
search_pv(
query = qry_funs$eq(inventor_last_name = "chambers"),
all_pages = TRUE
)
## ------------------------------------------------------------------------
# Here we are using the patents endpoint
search_pv(
query = qry_funs$eq(inventor_last_name = "chambers"),
endpoint = "patents",
fields = c("patent_number", "inventor_last_name", "assignee_organization")
)
## ------------------------------------------------------------------------
# While here we are using the assignees endpoint
search_pv(
query = qry_funs$eq(inventor_last_name = "chambers"),
endpoint = "assignees",
fields = c("patent_number", "inventor_last_name", "assignee_organization")
)
## ------------------------------------------------------------------------
res <- search_pv(
query = "{\"patent_number\":\"5116621\"}",
fields = c("patent_date", "patent_title", "patent_year")
)
# Right now all of the fields are stored as characters:
res
# Use more appropriate data types:
cast_pv_data(data = res$data)
## ------------------------------------------------------------------------
query <- with_qfuns(
text_any(patent_abstract = 'tool animal')
)
## ------------------------------------------------------------------------
query_1a <- with_qfuns(
and(
text_any(patent_abstract = 'tool animal'),
lte(patent_date = "2010-01-01")
)
)
query_1b <- with_qfuns(
and(
text_any(patent_abstract = 'tool animal'),
gt(patent_date = "2010-01-01")
)
)
## ------------------------------------------------------------------------
# Create field list
asgn_flds <- c("assignee_id", "assignee_organization")
subent_flds <- get_fields("assignees", c("applications", "gov_interests"))
fields <- c(asgn_flds, subent_flds)
# Pull data
res <- search_pv(
query = qry_funs$contains(inventor_last_name = "smith"),
endpoint = "assignees",
fields = fields
)
res$data
## ------------------------------------------------------------------------
library(tidyr)
# Get assignee/application data:
res$data$assignees %>%
unnest(applications) %>%
head()
# Get assignee/gov_interest data:
res$data$assignees %>%
unnest(gov_interests) %>%
head()
## ------------------------------------------------------------------------
unnest_pv_data(data = res$data, pk = "assignee_id")
|
b3a880249e7a18eb42af25e67576126995b855c0
|
6feaf86d6b090b2bfce36ded29df837e2355a65c
|
/plot6.R
|
1f4ce1b960f071ded97cdaff7378c8a6dacbff30
|
[] |
no_license
|
Arijit-Nath/ExData_Plotting2
|
e20c5d01976942bf183688d923a94d1d6247ab21
|
11463ad9c0f0b12cafce3e2a62c86f5c30381d07
|
refs/heads/master
| 2020-03-31T17:57:04.099289
| 2018-10-10T16:11:08
| 2018-10-10T16:11:08
| 152,440,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,138
|
r
|
plot6.R
|
#Load libraries
library(ggplot2)
library(stringr)
#Data files are stored in /DATA/ folder
#No need to read if it has already been read
if (!exists("NEI")) {
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if (!exists("SCC")) {
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
#Comparison between Baltimore City vs LA County based on vehicle emission(PM2.5)
comp_data <- NEI %>% filter(fips %in% c("24510", "06037"), type == "ON-ROAD") %>%
group_by(year, fips) %>%
summarise(total = sum(Emissions))
# Replace with actual city names, so that city name comes instead of county number
loc <- str_replace_all(comp_data$fips, c("06037" = "LA County", "24510" = "Baltimore City"))
#Convert the year into factor variable,
comp_data <- transform(comp_data, factor(comp_data$year))
png("Plot6.png")
plot <- ggplot(comp_data, aes(x = year, y = total)) +
geom_bar(stat = "identity", position = "dodge",aes(fill = loc)) +
ggtitle("On Road Emissions(PM2.5) Baltimore City vs LA County") +
labs(x = "Year", y = "Emissions PM2.5 (tons)")
print(plot)
dev.off()
|
ebf48c871f4027449f2969820b0a6d0a200e7eee
|
5149701f86a0cfcc16724af6d3133d404e61cf23
|
/SPP by IP (pointer).R
|
49f444e89e2706316336530b70e2e72aec71866c
|
[] |
no_license
|
takanari-seito/R-codes
|
bc39880dc2b2a0977bc587b0a1be9970a9b0830c
|
14e90e70bca2e858245248f2b18fb59a62e30e92
|
refs/heads/master
| 2020-12-28T22:46:46.195011
| 2015-09-29T08:56:57
| 2015-09-29T08:56:57
| 37,835,712
| 0
| 0
| null | null | null | null |
SHIFT_JIS
|
R
| false
| false
| 1,727
|
r
|
SPP by IP (pointer).R
|
#@@@グラフデータ(ポインタ)から整数計画法の入力形式への変換,最短距離問題の整数計画法を用いた解法@@@
###データフレームw読み込み(辺v->u;重みw)### Excel上であらかじめソートしておく
data <-read.table("graph3(pointer).txt") #要変更
names(data) <-c("v","u","w")
#頂点数n
n <-max(data$v,data$u)
#辺数ne
ne <-nrow(data)
#始点s
s <-1 #要変更
#終点
t <-18 #要変更
###LP行列m###
#objective function
d <-matrix(0:0,ncol=(ne+2))
for(i in 1:ne){d[i] <-data[i,3]}
m <-data.frame(d)
names(m) <-c(1:ne,"=<>","y")
#subject to
d <-m
d[,] <-0
for(i in 1:(n+ne)){m <-rbind(m,d)}
for(i in 1:ne){
v <-data[i,1]
u <-data[i,2]
if(v!=t){m[(v+1),i] <-(-1)}
if(u!=s){m[(u+1),i] <-1}
m[(i+n+1),i] <-1
m[(i+n+1),(ne+1)] <-">="
}
for(i in 1:n){
v <-data[i,1]
m[(i+1),(ne+1)] <-"="
m[(i+1),(ne+2)] <-if(i==s){-1}else{if(i==t){1}else{0}}
}
###整数計画用の様式に変換###
f.obj <-c(m[1,1:ne])
f.con <-m[2:(n+ne+1),1:ne]
f.dir <-m[2:(n+ne+1),(ne+1)]
f.rhs <-m[2:(n+ne+1),(ne+2)]
#整数計画問題を解く
library(lpSolve)
result <-lp("min",f.obj,f.con,f.dir,f.rhs,int.vec=1:ne)$solution #"min"->"max"で最長距離問題に変更可能(回路を含まない場合)
data <-cbind(data,result)
#最短距離d
d=0
for(i in 1:ne){d <-d+data[i,3]*data[i,4]}
###ポインタの行列表示###
wm <-matrix(nrow=n,ncol=n)
wm[,] <-Inf
for(i in 1:ne){wm[(data[i,1]),(data[i,2])] <-data[i,3]}
em <-matrix(nrow=n,ncol=n)
em[,] <-0
for(i in 1:ne){if(data[i,4]==1){em[(data[i,1]),(data[i,2])] <-1}}
#道順path
path <-s
v <-s
while(v!=t){
i <-1
while(em[v,i]!=1){
i=i+1
}
v <-i
path <-paste(path,v)
}
###結果表示###
m
data
wm
em
d
path
|
bc1c1949900233fc2da5c2d44c4209ecfa76c809
|
1d9019bf76931acd5a25ff83af59fadf49a41767
|
/R/0_functions.R
|
b7ef89e0cb8c114301560dff09ad8cdbef3b4177
|
[] |
no_license
|
GatesDupont/scr_design_sims
|
55b0c8a6f1fab770f143252c6cfd4253cd56c318
|
6e620d229b17dba95520b0dc495d228441a4c377
|
refs/heads/master
| 2023-01-20T08:24:09.016921
| 2020-11-19T21:39:50
| 2020-11-19T21:39:50
| 249,788,566
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,773
|
r
|
0_functions.R
|
# Gates Dupont #
# gdupont@umass.edu #
# Sept. '19 - July '20 #
# # # # # # # # # # # #
require(oSCR)
require(dplyr)
require(NLMR)
require(raster)
require(viridis)
require(stringr)
require(landscapetools)
select = dplyr::select
"RIGHT"
right = function(text, num_char) {
substr(text, nchar(text) - (num_char-1), nchar(text))
}
"SIMPLE SS PLOT"
# Plot to check designs
plot_design = function(SS, TT, design){
plot(SS, asp=1, col="gray80", cex=0.2)
points(TT, pch=20, col="orange", cex=2)
points(design, pch=20, col="blue", cex=2.5)
}
"PLANAR GRADIENT"
# Fixed version of nlm_planargradient
r.nlm_planargradient = function (ncol, nrow, resolution = 1, direction = NA, rescale = TRUE)
{
checkmate::assert_count(ncol, positive = TRUE)
checkmate::assert_count(nrow, positive = TRUE)
checkmate::assert_numeric(direction)
checkmate::assert_logical(rescale)
if (is.na(direction)) {
direction <- stats::runif(1, 0, 360)
}
eastness <- sin((pi/180) * direction)
southness <- cos((pi/180) * direction) * -1
col_index <- matrix(0:(ncol - 1), nrow, ncol, byrow = TRUE)
row_index <- matrix(0:(nrow - 1), nrow, ncol, byrow = FALSE)
gradient_matrix <- (southness * row_index + eastness * col_index)
gradient_raster <- raster::raster(gradient_matrix)
raster::extent(gradient_raster) <- c(0, ncol(gradient_raster) *
resolution, 0, nrow(gradient_raster) * resolution)
if (rescale == TRUE) {
gradient_raster <- util_rescale(gradient_raster)
}
return(gradient_raster)
}
"DENSIFY"
densify = function(SS, N = 300, landscape = NA, d.beta = 3, seed){
set.seed(seed)
SS = SS[,c("X", "Y")]
# Check parameters
if(!(landscape %in% c("uniform", "directional", "patchy"))){
stop("Density must be one of: uniform, directional, patchy")
}
# Get resolution
rr = SS %>%
as.data.frame %>%
arrange(Y) %>%
select(X) %>%
slice(1:2) %>%
pull(X) %>%
diff %>%
as.numeric
# Calculate number of pixels in each direction (rectangular over the area)
l.nrow = SS %>% as.data.frame %>% pull(Y) %>% unique %>% sort %>% length
l.ncol = SS %>% as.data.frame %>% pull(X) %>% unique %>% sort %>% length
# Pull the max n pixels for square
nside = max(l.ncol, l.nrow)
# Pull the length of the longer side
lside = max(
max(SS[,"X"]) - min(SS[,"X"]),
max(SS[,"Y"]) - min(SS[,"Y"]))
# Generate the full extent rectangle (might not work for negative axes)
full = expand.grid(X = seq(min(SS[,"X"]), min(SS[,"X"]) + lside, rr),
Y = seq(min(SS[,"Y"]), min(SS[,"Y"]) + lside, rr))
# Generate densities
if(landscape == "uniform"){
surface0 = 1
surface = 1
}
if(landscape == "directional"){
# Generate random landscape
l = nlm_gaussianfield(ncol = nside, nrow = nside, resolution = rr, nug=0,
user_seed = seed, rescale = TRUE,
autocorr_range = round(nside))
# Assign the values to the full extent
full$Z = l@data@values
# Extract raster values from the full extent rectangle to the actual SS and scale
surface = raster::extract(x = rasterFromXYZ(full), y = as.data.frame(SS))
}
if(landscape == "patchy"){
# Simulating the landscape
l = nlm_gaussianfield(ncol = nside, nrow = nside, resolution = rr, nug=0,
user_seed = seed, rescale = TRUE,
autocorr_range = round(nside * 0.06))
# Assign the values to the full extent
full$Z = l@data@values
# Extract raster values from the full extent rectangle to the actual SS and scale
surface = raster::extract(x = rasterFromXYZ(full), y = as.data.frame(SS))
}
# Parameters
b0 = -1
b1 = d.beta
pi = exp(b0 + b1*surface) / sum(exp(b0 + b1*surface)) # Calculating probabilities
# Final SS
SS = cbind(SS, pi, surface)
colnames(SS) = c("X", "Y", "density", "surface")
return(SS)
}
"LOAD ALL FILES"
load_files = function(wd, folder){
dir = paste0(wd, "/", folder)
# Load files
filenames = list.files(dir, pattern="*.csv", full.names=TRUE)
objs = lapply(filenames, read.csv)
# Get names
obj.names = list.files(folder) %>%
str_remove_all(".csv")
# Assign names
for(i in 1:length(objs)){
assign(obj.names[i], objs[[i]], envir = .GlobalEnv)
}
}
"RMSE CALCULATION"
calc_rmse = function(estimates, parameter){
diffs = c()
for(i in 1:length(estimates)){
diffs[i] = estimates[i] - parameter
}
result = sqrt(sum(diffs^2) * (1/length(estimates)))
return(result)
}
"SRMSE CALCULATION"
calc_srmse = function(estimates, parameter){
diffs = c()
for(i in 1:length(estimates)){
diffs[i] = estimates[i] - parameter
}
result = (1/parameter) * (sqrt(sum(diffs^2) * (1/length(estimates))))
if(length(result) == 0){
result = NaN
} # Handling NA vals
return(result)
}
"SIMULATOR"
#----SIMULATOR----
#Create a simulator function
simulator<- function(traps, ss, N, p0, sigma, K, nsim, it = 1,
landscape = NA, d.beta = 3, plot = TRUE,
it.out.dir = NA, plots.out.dir = NA, wd = getwd()) {
# Assign and/or create plotting directory
plots.out.subdir = paste0(wd, "/", plots.out.dir, "/", "scenario_", it)
if(!dir.exists(plots.out.subdir)){
dir.create(plots.out.subdir)
}
# Initialize data-collection matrix
simout1 <- matrix(NA, nrow=0, ncol=18) #c reate empty matrix for output
colnames(simout1)<- c("p0","sig","d0", "d.beta", # estimates
"nind", # number of individuals (length of first dimmension of y)
"nind.c", "nind.r", "r_s", "avg.dets", "avg.r", "avg.nlocs", "avg.nspatcaps", # manually calculated summary stats
"avg.caps","avg.spatial","mmdm", # summary stats from oSCR (but see metric definitions)
"failures","EN","oSCR_model") # other components
# Initialize while loop starting values
sim = 1
sim_try = 0
total_its = 0
# Get nsim acceptable simulations
while(sim < (nsim + 1)){
# Update loop
total_its = total_its + 1
# Tell the user what's going on:
print(paste("Simulation Number", sim_try + 1, sep = " ")) # keep track
cat("size of state-space: ", nrow(ss), " pixels", fill=TRUE)
cat(paste0("\n Try ", sim_try + 1, "\n"))
# Adding density surface to statespace
statespace = densify(SS = ss, landscape = landscape, d.beta = d.beta, seed = sim)
# Sset seed after internal use of set.seed() in densify()
seed = total_its
set.seed(seed)
# Sampling activity centers
ac = as.numeric()
for(i in 1:N){
ac[i] = base::sample(x = nrow(statespace), size = 1, prob = statespace[,"density"])
}
s = statespace[ac, c("X", "Y") ]
# Make the state space data frame
myss <- as.data.frame(statespace)[,c("X", "Y", "surface")]
myss$Tr <- 1
myss <- list(myss)
class(myss) <- "ssDF"
# individual-trap distance matrix
D <- e2dist(s,traps)
# Compute detection probabilities:
pmat <- p0*exp(-D*D/(2*sigma*sigma)) # p for all inds-traps p_ij
ntraps <- nrow(traps)
y <- array(0, dim=c(N, ntraps, K)) # empty 3D array (inds by traps by occ)
for(i in 1:N){# loop through each individual/activity center
for(j in 1:ntraps){# loop through each trap
y[i,j,1:K]<- rbinom(K, 1, pmat[i,j]) # y ~ binomial(p_ijk)
}
}
ncap <- apply(y,c(1), sum) # sum of captures for each individual
y.all = y # for summary stats
y <- y[ncap>0,,] # reduce the y array to include only captured individuals
# Some summary information, that is actually printed for you later with "print(scrFrame)"
caps.per.ind.trap <- apply(y,c(1,2),sum) #shows # capts for each indv across all traps
# Check for captures
check.y = length(dim(y)) %>%
if(. > 2){return(TRUE)} else {return(FALSE)}
# Check for spatial recaps
check.sp_recaps = as.matrix((caps.per.ind.trap > 0) + 0) %>%
rowSums() %>%
c(.,-1) %>% # This is just to avoid warning messages due to empty lists
max %>%
if(. > 1){return(TRUE)} else {return(FALSE)}
check = 0 # Clear from previous iteration
check = check.y + check.sp_recaps
# Checking for sp.recaps implies getting caps,
# but for troubleshooting good to keep both checks
if(check != 2){
#plot(rasterFromXYZ(statespace[,c(1,2,4)]), col = rev(viridis(1000))) # plot the state space
#points(s, pch = 20, col = "white")
#points(s)
#simout1 = rbind(simout1, rep(NA, ncol(simout1))) # Turn this off for the big run
} else if(check ==2){
# Make the SCRframe
colnames(traps)<- c("X","Y")
sf <- make.scrFrame(caphist=list(y), traps=list(traps))
# Plotting
if(plot == TRUE){
# Create individual plot output file
plots.out.dir.file = paste0(plots.out.subdir, "/", "sim_", it, "_", sim, ".png")
png(filename = plots.out.dir.file, width = 500, height = 500)
# Make plot
plot(rasterFromXYZ(statespace[,c(1,2,4)]), col = rev(viridis(1000))) # plot the state space
points(s, pch = 20, col = "white")
points(s)
spiderplot(sf, add=TRUE)
dev.off()
}
#----SCR SUMMARY STATS----
# Collapse occasion dim
collap.y.tot = apply(y.all, 1:2, sum)
# Copy (for later)
collap.y.sum = collap.y.tot
# Number of captures per individual
ncaps.per.ind = rowSums(collap.y.tot)
# Convert this to binary and count ntraps per individual
collap.y.sum[collap.y.sum > 0] = 1
ntraps.per.ind = rowSums(collap.y.sum)
ntraps.per.capInd = ntraps.per.ind[ntraps.per.ind>0]
# Basics, n and r
nind.c = length(ncaps.per.ind[ncaps.per.ind>0]) # number of individuals with captures
nind.r = length(ncaps.per.ind[ncaps.per.ind>1]) # number of individuals with recaptures
# For only captured/detected individuals...
r_s = sum(ntraps.per.capInd - 1) # total sp recaps... sum (number of traps per detected ind - 1)
avg.dets = mean(ncaps.per.ind[ncaps.per.ind>0]) # avg. of the number of caps per detected individual
avg.r = mean(ntraps.per.capInd - 1) # avg of the number of recaps per detected ind
avg.nlocs = mean(ntraps.per.capInd) # avg of the number of unique locations per individual
avg.nspatcaps = mean(ntraps.per.capInd - 1) # avg of the number of spatial captures per individual
# Finally summary stats object
SCR_summary_stats = list(nind.c, nind.r, r_s, avg.dets, avg.r, avg.nlocs, avg.nspatcaps)
names(SCR_summary_stats) = c("nind.c", "nind.r", "r_s", "avg.dets", "avg.r", "avg.nlocs", "avg.nspatcaps")
#----Continuing to model fitting----
# Fit a basic model SCR0
out1 <- oSCR.fit(model=list(D~1,p0~1,sig~1), scrFrame = sf, ssDF=myss, trimS = 4*sigma)
# UNIFORM DENSITY Estimates: d_.
stats <- print(sf)[[1]] # pulls avg caps, avg spatial caps, and mmdm
est <- out1$outStats$mle # pulls p0, sigma, and d0 estimates from the model
en = get.real(out1, type="dens", newdata=data.frame(session=factor(1)),
d.factor=nrow(out1$ssDF[[1]]))[1,1] # Total abundance
# Append to data-collection matrix
sim_vals = c(plogis(est[1]), exp(est[2]), exp(est[3]), NA, dim(y)[1], SCR_summary_stats, stats, sim_try, en, 1)
simout1 = rbind(simout1, sim_vals)
# INHOMOGENOUS DENSITY Estimate: d_s
if(landscape != "uniform"){
out2 <- oSCR.fit(model=list(D~surface,p0~1,sig~1), scrFrame = sf, ssDF=myss, trimS = 4*sigma)
est2 = out2$outStats$mle
en2 = sum(get.real(out2, type="dens")[[1]]$estimate)
sim_vals2 = c(plogis(est[1]), exp(est2[2]), exp(est2[3]), est2[4], dim(y)[1], SCR_summary_stats, stats, sim_try, en2, 2)
simout1 = rbind(simout1, sim_vals2)
}
}
if(!is.na(it.out.dir) & is.character(it.out.dir)){
it.out.dir.file = paste0(wd, "/", it.out.dir, "/", "sim", it, ".txt")
write.table(simout1, file = it.out.dir.file)
}
# Updating while() loop
if(check != 2){
sim_try <- sim_try + 1
} else {
sim <- sim + 1
sim_try = 0
}
}
return(simout1)
}
|
e7e51af64ceeea3d4e541b9e586fddfaa990f5d2
|
7d821006d3c3664c5426e6a488eeb445f587a591
|
/code/create-exons-ercc.R
|
1beb07f1f79e4f5fd43aab7ae1f2d45f8b55a2a3
|
[
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
jdblischak/singlecell-qtl
|
04e82fcadeecbb2edc1cb6c4181078ccd0becbc4
|
34fcec0eec5ada409c725c56634517903d6a8818
|
refs/heads/master
| 2022-01-24T00:57:35.897712
| 2022-01-06T20:32:30
| 2022-01-06T20:32:30
| 99,834,237
| 10
| 5
|
NOASSERTION
| 2019-10-21T15:26:19
| 2017-08-09T17:18:38
|
Python
|
UTF-8
|
R
| false
| false
| 2,379
|
r
|
create-exons-ercc.R
|
#!/usr/bin/env Rscript
# Create exons file of ERCC spike-ins for mapping reads to genes with featureCounts.
#
# Usage:
# Rscript create-exons-ercc.R path/to/ERCC92.gtf > file.saf
#
# where ERCC92.gtf is downloaded from Invitrogen:
# http://media.invitrogen.com.edgesuite.net/softwares/ERCC92.gtf
#
# Notes:
# + Output is in Simplified Annotation Format (SAF)
# + Columns: GeneID, Chr, Start, End, Strand
# + Coordinates are 1-based, inclusive on both ends
# Parse input arguments
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
ercc <- args[1]
stopifnot(basename(ercc) == "ERCC92.gtf")
# Import ERCC data
ercc_gtf <- read.table(ercc, sep = "\t", stringsAsFactors = FALSE)
# http://www.genome.ucsc.edu/FAQ/FAQformat.html#format3
colnames(ercc_gtf) <- c("seqname", # The name of the sequence. Must be a chromosome or scaffold.
"source", # The program that generated this feature.
"feature", # The name of this type of feature. Some examples of standard feature types are "CDS", "start_codon", "stop_codon", and "exon".
"start", # The starting position of the feature in the sequence. The first base is numbered 1.
"end", # The ending position of the feature (inclusive).
"score", # A score between 0 and 1000. If the track line useScore attribute is set to 1 for this annotation data set, the score value will determine the level of gray in which this feature is displayed (higher numbers = darker gray). If there is no score value, enter ".".
"strand", # Valid entries include '+', '-', or '.' (for don't know/don't care).
"frame", # If the feature is a coding exon, frame should be a number between 0-2 that represents the reading frame of the first base. If the feature is not a coding exon, the value should be '.'.
"group" # All lines with the same group are linked together into a single item.
)
ercc_saf <- ercc_gtf[, c("seqname", "seqname", "start", "end", "strand",
"seqname")]
colnames(ercc_saf) <- c("GeneID", "Chr", "Start", "End", "Strand", "Name")
# Save as tab-separated file in Simplified Annotation Format (SAF)
write.table(ercc_saf, "", quote = FALSE, sep = "\t",
row.names = FALSE)
|
361f19906982a05e5a4f3ec7fa01c03431b0e0e4
|
ba2845eadc8880147e906ab727d322d875226efa
|
/Analyses/expdesigns.R
|
5ae1bc28716c3bca2a7518ab982c5d9995e2ec8c
|
[] |
no_license
|
AileneKane/radcliffe
|
80e52e7260195a237646e499bf4e3dad4af55330
|
182cd194814e46785d38230027610ea9a499b7e8
|
refs/heads/master
| 2023-04-27T19:55:13.285880
| 2023-04-19T15:15:02
| 2023-04-19T15:15:02
| 49,010,639
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,442
|
r
|
expdesigns.R
|
### Started 28 June 2016 ###
### By Lizzie (so far) ###
## Looking at simple ANOVA results of warming experiments ##
## Given different estimates of warming and given exact design (e.g., blocks) ##
################################
## Read me, notes from Lizzie ##
################################
# Merge in a file that has the different target/reported temperatures by Year
# Think about default contrasts (options(contrasts=c("contr.sum", "contr.poly"))
# Species .... just treating it as ranef at intercept, definitely not ideal but easy, really need to get into slopes I think.
# 104 BACE observations with no block: Ailene says these are plots where phonology data were collected, but there is no climate data. they are separate from the experimental setup (maybe remove?)
# Watch out on including block as not all studies have it (you thus lose lots of data)
# Year ... think more, continuous variable? Fixef or Ranef?
# And don't forget! Events (BBD or FLD, for example). Need to consider. ##
## housekeeping
rm(list=ls())
options(stringsAsFactors = FALSE)
library(plyr)
library(dplyr)
library(ggplot2)
library(lme4)
setwd("~/Documents/git/projects/meta_ep2/radcliffe/analyses")
## get the data
# Christy says (early June 2016): I just pushed two sets of calculations: one at the plot level (EffectiveWarming_Plot.csv) and one at the treatment level (EffectiveWarming_Treatment.csv). Because not all treatments have above or below ground temperature, I went ahead and calculated the deviation from control/ambient for aboveground and belowground max, min, and mean temperature.
effwarm <- read.csv("EffectiveWarming_Treatment.csv", header=TRUE)
effwarm.plot <- read.csv("EffectiveWarming_Plot.csv", header=TRUE)
treats <- read.csv("treats_detail.csv", header=TRUE)
expphen <- read.csv("exppheno.csv", header=TRUE)
expsite <- read.csv("expsiteinfo.csv", header=TRUE)
# summaries
table(effwarm.plot$site, effwarm.plot$temptreat)
table(effwarm.plot$site, effwarm.plot$preciptreat)
table(effwarm.plot$site, effwarm.plot$temptreat, effwarm.plot$preciptreat)
ayprecip <- c("bace", "chuine", "cleland", "force", "sherry")
# these need to match ...
sort(unique(paste(effwarm.plot$site, effwarm.plot$temptreat)))
sort(unique(paste(effwarm$site, effwarm$temptreat)))
sort(unique(paste(treats$site, treats$temptreat)))
# just looking at treatements ...
effwarm.plot.2temp <- subset(effwarm.plot, temptreat<3)
table(effwarm.plot.2temp$site, effwarm.plot.2temp$temptreat)
# just looking around some more
chuine.clim <- subset(effwarm.plot, site=="chuine")
chuine.phen <- subset(expphen, site=="chuine")
unique(chuine.clim$plot)
unique(chuine.phen$plot) # these don't merge!
## merge the data! Thank you Ailene
phendat.simple <- join(expphen, treats, by=c("site", "block", "plot"))
phendat <- join(phendat.simple, effwarm.plot, by=c("site", "block", "plot","temptreat",
"preciptreat"), match="first")
phendat$target <- as.numeric(phendat$target)
phendat$latbi <- paste(phendat$genus, phendat$species)
phendat$yr <- phendat$year-1980 # a little scaling
## ask a few questions
whoblocks <- ddply(phendat, c("site"), count,
block=block)
noblocks <- c("clarkduke", "clarkharvard", "dunne", "ellison", "marchin", "price", "sherry")
specreps <- ddply(phendat, c("latbi"), count,
site=site)
## need to clean up ...
phendat$reported <- as.numeric(phendat$reported)
phendat.noprecip <- subset(phendat, preciptreat==0 | is.na(preciptreat)==TRUE)
phendat.noprecip.wrep <- subset(phendat.noprecip, is.na(reported)==FALSE)
# Remember (maybe better to think of as a factor at some point?)
mode(phendat$year)
##
## plot
ggplot(phendat, aes(x=year, y=doy, color=genus)) +
facet_wrap(~site, scales="free_x") +
geom_point()
##
## First, let's look at Cleland, the only split-plot (the plot is quadrant-within-plot in this case)
# Based on Zavaleta et al. 2003 (PNAS):
# Warming applied at plot level (as was CO2)
# Precip (and N dep) were applied within plots at the quadrant level
jasper <- subset(phendat, site=="cleland")
jr.split <- lmer(doy~temptreat*preciptreat + (1|temptreat:block) + (1|yr) +
(1|latbi), data=jasper)
jr.block <- lmer(doy~temptreat*preciptreat + (1|block) + (1|yr) + (1|latbi), data=jasper)
anova(jr.block, jr.split)
# but does appropriately handle the repeated measures aspect? Below are not very happy
jr.split.rm <- lmer(doy~temptreat*preciptreat*yr + (1|temptreat:block) + (1|block/plot) +
(1|latbi), data=jasper) # cannot nest plot in block here: (1|temptreat:block/plot)
jr.block.rm <- lmer(doy~temptreat*preciptreat*yr + (1|block/plot) +
(1|latbi), data=jasper)
anova(jr.split.rm, jr.block.rm)
##
## Does including block impact findings?
phenblock <- phendat[which(!phendat$site %in% noblocks),]
phenblock.noprecip <- subset(phenblock, preciptreat==0 | is.na(preciptreat)==TRUE)
phenblock.wprecip <- phenblock[which(phenblock$site %in% ayprecip),]
# start with temp only, yr as fixed
ignoreblock <- lmer(doy~temptreat*yr + (1|site/plot) + (1|latbi), data=phenblock.noprecip,
na.action=na.exclude)
block <- lmer(doy~temptreat*yr + (1|site/block/plot) + (1|latbi), data=phenblock.noprecip,
na.action=na.exclude)
block.ranefyr <- lmer(doy~temptreat + (1|site/block/plot) + (1|latbi) + (1|yr), data=phenblock.noprecip,
na.action=na.exclude)
summary(ignoreblock) # plot:site = 3.7
summary(block) # variance explained by block and plot = 3.76
summary(block.ranefyr) # hmm .. (note: 7K obs)
anova(ignoreblock)
anova(block)
anova(block.ranefyr)
# Take homes: blocking doesn't do much for you. Yr is a huge effect.
# same for precip*temp studies
ignoreblock.p <- lmer(doy~temptreat*preciptreat*yr + (1|site/plot) + (1|latbi), data=phenblock.wprecip,
na.action=na.exclude)
block.p <- lmer(doy~temptreat*preciptreat*yr + (1|site/block/plot) + (1|latbi), data=phenblock.wprecip,
na.action=na.exclude)
block.ranefyr.p <- lmer(doy~temptreat*preciptreat + (1|site/block/plot) + (1|latbi) + (1|yr), data=phenblock.wprecip,
na.action=na.exclude)
# note the warnings!
summary(ignoreblock.p) # plot:site = 3.8
summary(block.p) # 3.8 again, really not much added by block I don't think?
summary(block.ranefyr.p) # hmm
anova(ignoreblock.p)
anova(block.p)
anova(block.ranefyr.p)
# stop(print("stop, the below code is in progress!"))
mode(phendat$target)
mode(phendat$reported)
# effects of target versus reported temp? Not much.
temp.target <- lmer(doy~target + (1|site/block/plot) + (1|latbi) + (1|yr), data=phendat.noprecip.wrep,
na.action=na.exclude)
temp.reported <- lmer(doy~reported + (1|site/block/plot) + (1|latbi) + (1|yr), data=phendat.noprecip.wrep,
na.action=na.exclude)
summary(temp.target)
summary(temp.reported)
# super similar estimates of -3.6 days (in my 2012 paper I got -3 (2.9 something), see in Fig S8 in my paper for above-canopy heaters) ... but based on 3K obs because of block ...
# effects of target versus reported temp? Not much.
temp.target.noblock <- lmer(doy~target + (1|site/plot) + (1|latbi) + (1|yr), data=phendat.noprecip.wrep,
na.action=na.exclude)
temp.reported.noblock <- lmer(doy~reported + (1|site/plot) + (1|latbi) + (1|yr), data=phendat.noprecip.wrep,
na.action=na.exclude)
summary(temp.target.noblock) # -0.37
summary(temp.reported.noblock) # -0.57
unique(phendat.noprecip.wrep$site)
## Real temperatures ...
phendat.air <- subset(phendat.noprecip, is.na(AGtemp_mean_dev)==FALSE)
phendat.soil <- subset(phendat.noprecip, is.na(BGtemp_mean_dev)==FALSE)
unique(phendat.air$site)
unique(phendat.soil$site)
## soil temps from 9 studies
ggplot(phendat.soil, aes(x=yr, y=doy, color=genus)) +
facet_wrap(~site, scales="free_x") +
geom_point()
## air temps from 6 studies
ggplot(phendat.air, aes(x=yr, y=doy, color=genus)) +
facet_wrap(~site, scales="free_x") +
geom_point()
temp.tar.air <- lmer(doy~target + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.air, na.action=na.exclude) # block not sampled enough in this data subset
temp.tar.soil <- lmer(doy~target + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.soil, na.action=na.exclude)
temp.rep.air <- lmer(doy~reported + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.air, na.action=na.exclude) # block not sampled enough in this data subset
temp.rep.soil <- lmer(doy~reported + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.soil, na.action=na.exclude) # block not sampled enough in this data subset
temp.real.air <- lmer(doy~AGtemp_mean_dev + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.air, na.action=na.exclude) # block not sampled enough in this data subset
temp.real.soil <- lmer(doy~BGtemp_mean_dev + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.soil, na.action=na.exclude)
summary(temp.tar.air) # 0.05 (22K obs)
summary(temp.rep.air) # 0.71 (22K obs)
summary(temp.real.air) # -2.3 (48K obs)
summary(temp.tar.soil) # 0.7 (26K obs)
summary(temp.rep.soil) # 0.09 (23K obs)
summary(temp.real.soil) # -2.514 (55K obs)
# does max air or min matter more?
temp.real.air.min <- lmer(doy~AGtemp_min_dev + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.air, na.action=na.exclude)
temp.real.air.max <- lmer(doy~AGtemp_max_dev + (1|site/plot) + (1|latbi) + (1|yr),
data=phendat.air, na.action=na.exclude)
summary(temp.real.air) # -2.33
summary(temp.real.air.min) # -1.9
summary(temp.real.air.max) # -1
# Hmm, the mean is so much more meaningful perhaps?
|
4ec33d3048cb52918da80dfc801877e127bd5d55
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MSPRT/examples/ump.match.ber.Rd.R
|
c539b5940ce4034696307419fd5fed67e8c9ee66
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 195
|
r
|
ump.match.ber.Rd.R
|
library(MSPRT)
### Name: ump.match.ber
### Title: Finding the "evidence threshold (delta)" in a proportion test
### Aliases: ump.match.ber
### ** Examples
ump.match.ber(n.obs= 60, p0= .2)
|
4405ae0fe5a0a7920aa4a5df20b46206fbc2bdf8
|
4458625f2049529b04909571b0cd7477ab988964
|
/tests/testthat/test_cache_rfsrc_datasets.R
|
6c3c006137c128758677dced3da5b0c0351063c6
|
[] |
no_license
|
ehrlinger/ggRFVignette
|
22622b90d728cf2985cb2b60d62d4e32c65a8b27
|
501fbba6c2ad33625cad1732481cf48961f1296c
|
refs/heads/master
| 2020-05-22T01:33:27.901068
| 2018-06-12T18:03:36
| 2018-06-12T18:03:36
| 61,134,330
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
test_cache_rfsrc_datasets.R
|
# testthat for gg_error function
context("cache_rfsrc_dataset tests")
test_that("cache_rfsrc_dataset",{
# # Check the default set of data
# expect_output(cache_rfsrc_datasets(test=TRUE),
# "iris: randomForest")
#
# # If we have a bad path...
# expect_error(cache_rfsrc_datasets(pth="nothing"))
#
# # If we want the alternative sets
# expect_output(cache_rfsrc_datasets(set=c("airq"),
# test=TRUE),
# "airq: randomForest")
# #
})
|
58cea4e51e90820998f7458707d57442713a0907
|
0beecb08c0462c9ef02076d811fc37a185af18c3
|
/sim1/epiHistory2tree0.R
|
82a24b831d844d0c1646ba411af8866c443abafc
|
[
"MIT"
] |
permissive
|
emvolz/PhyDyn-simulations
|
08fc23e66c19c3136a6396bd35ec2426556771c8
|
694e5cddae08aa7962ce462b91c9dcb6e79be39f
|
refs/heads/master
| 2021-01-22T12:35:27.752379
| 2018-07-04T09:16:40
| 2018-07-04T09:16:40
| 102,350,223
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,705
|
r
|
epiHistory2tree0.R
|
require(ape)
epi2tree <- function( transmission_table, removal_table )
{
print(date())
donor = transmission_table$donor
recip = transmission_table$recip
t = transmission_table$t
donor[is.na(donor)] <- 'src'
# ~ pids, tremoval, tiplab
pids = removal_table$pids
tremoval = removal_table$tremoval
tiplab = removal_table$tiplab
#TODO should validate input
N <- length(pids)
Nnode <- N - 1 + 1 #include source +1 #N + Nnode
edge <- matrix( NA, nrow = length(t) + length(pids) , ncol = 2)
edge.length <- rep(NA, length(t) + length(pids))
recip2donor <- setNames( recip, donor )
i <- order(t)
donor <- donor[i]
recip <- recip[i]
t <- t[i]
print(c(date(), 'sorted inputs'))
recip2donorNode <- setNames( rep(NA, length(pids)), pids)
recip2donorNode[ recip2donor[pids]=='src' ] <- 'src'
names(tremoval) <- pids
# add transm edges
donor2trCounter <- setNames(rep(0, length(pids)+1), c('src', pids) )
donor2ntr <- sapply( pids, function(pid) sum( donor== pid ))
print(c(date(), 'donor2ntr'))
node2time <- list()
node2time[['src']] <- t[1]-1
k <- 1
print( c(date(), 'frontmatter' ))
for ( i in 1:length(t)){
u <- donor[i]
v <- recip[i]
donor2trCounter[u] <- donor2trCounter[u] + 1
trcount <- donor2trCounter[u]
if (is.na(trcount)) browser()
if (u == 'src'){
donornode <- u
} else{
donornode <- paste(u, trcount, sep='_')
}
recip2donorNode[v] <- donornode
if (u != 'src'){
edge[k,2] <- donornode
node2time[[ donornode ]] <- t[i]
if (trcount==1){
edge[k, 1] <- recip2donorNode[u]
} else{
edge[k,1] <- paste(u, trcount-1, sep='_')
}
k <- k + 1
}
if (i %% 500 == 0) print(c( date(), i))
}
# add terminals
names(tiplab) <- pids
for ( i in 1:length( pids )){
pid <- pids[i]
tl <- tiplab[pid]
node2time[[tl]] <- tremoval[i]
if ( donor2trCounter[pid] == 0 ){
lastnode <- recip2donorNode[pid]
} else{
trcount <- donor2trCounter[pid]
lastnode <- paste(pid, trcount, sep='_')
}
edge[k,1] <- lastnode
edge[k,2] <- tl
k <- k + 1
}
internalNodes <- setdiff( unique( as.vector( edge ) ), tiplab)
i_internalNodes <- setNames( N + 1:length(internalNodes), internalNodes )
i_tips <- setNames( 1:N, tiplab)
nodemap <- c( i_internalNodes, i_tips)
edge <- edge[!is.na(edge[,1]), ]
edge2 <- matrix(NA, nrow =nrow(edge), ncol =ncol(edge))
edge2[,1] <- nodemap[ edge[,1] ]
edge2[,2] <- nodemap[ edge[,2] ]
edge.length <- rep(NA, nrow(edge2))
edge.length <- unname( unlist( node2time[edge[,2]] ) - unlist(node2time[ edge[,1] ] ) )
o <- list( edge = edge2, tip.label = tiplab, edge.length = edge.length, Nnode = Nnode )
class(o) <- 'phylo'
tre <- multi2di( read.tree(text= write.tree( o )) )
}
|
7dd2a49aace89c623abcc46a336c1e2adf788d7f
|
86a282f2e03d0d8e64127bfe2aa4be6d968d24b4
|
/man/puffin.Rd
|
9cefa2d054d29018253e7902878dd246eb157ea5
|
[] |
no_license
|
u44027388/LearnBayes
|
fc57e5689c9619de966f4b9e0210bb3aa078ec8f
|
f7722076d01768bb845bfe9bed78c365fcf292df
|
refs/heads/master
| 2021-09-14T19:23:22.283849
| 2018-05-17T21:04:10
| 2018-05-17T21:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
rd
|
puffin.Rd
|
\name{puffin}
\alias{puffin}
\docType{data}
\title{Bird measurements from British islands}
\description{
Measurements on breedings of the common puffin on different
habits at Great Island, Newfoundland.
}
\usage{
puffin
}
\format{
A data frame with 38 observations on the following 5 variables.
\describe{
\item{Nest}{nesting frequency (burrows per 9 square meters)}
\item{Grass}{grass cover (percentage)}
\item{Soil}{mean soil depth (in centimeters)}
\item{Angle}{angle of slope (in degrees)}
\item{Distance}{distance from cliff edge (in meters)}
}
}
\source{Peck, R., Devore, J., and Olsen, C. (2005), Introduction to Statistics
And Data Analysis, Thomson Learning.}
\keyword{datasets}
|
b15c3436a442ccabe1603aa2a4d3da01176dd4f8
|
a2099edf0795624d588faae2095f6104944977ab
|
/R/utils.R
|
5487b5316b8af411b060abfa9d63a24b4cc56bfa
|
[] |
no_license
|
ijlyttle/shinychord
|
00343338bb7e0fdf33ee703201bc89ecf82e4726
|
71c6abd6b05d012f1fb54a227f6ca24adb760911
|
refs/heads/master
| 2020-05-17T03:58:11.967469
| 2016-01-29T23:52:00
| 2016-01-29T23:52:00
| 40,910,215
| 14
| 2
| null | 2016-01-29T23:52:00
| 2015-08-17T16:27:03
|
HTML
|
UTF-8
|
R
| false
| false
| 3,869
|
r
|
utils.R
|
#' Closure for file-downloading
#'
#' A closure to provide a function to download a file,
#' by copying it from the specified place. Useful for
#' \code{shiny::downloadHandler()}.
#'
#' @param file_source character describing path to file to be copied
#'
#' @return function
#' @export
#
cl_file_copy <- function(file_source){
function(file){
file.copy(from = file_source, to = file)
}
}
#' Get the names of all the columns of the dataframe
#' that inherit from the supplied class name
#'
#' @param df dataframe
#' @param what character vector of class we wish to find
#'
#' @return character vector
#' @export
#
df_names_inherits <- function(df, what){
inherits_class <- vapply(df, inherits, logical(1), what = what)
names_class <- names(inherits_class)[inherits_class]
names_class
}
#' Sets the timezone of all time-based columns in a dataframe
#'
#' @param df dataframe
#' @param tz timezone, an Olson timezone or "UTC" (default)
#'
#' @return dataframe
#'
#' @examples
#' df_with_tz(coltypes_sample, tz = "America/Chicago")
#'
#' @export
#
df_with_tz <- function(df, tz = "UTC"){
is_datetime <- vapply(df, inherits, logical(1), "POSIXct")
fn_set_tz <- function(x){
attr(x, "tzone") <- tz
x
}
df[is_datetime] <- lapply(df[is_datetime], fn_set_tz)
df
}
#
#' function for scrollable pre-formatted text
#'
#' This is used as the \code{container} argument in \code{shiny::htmlOutput}
#'
#' @param ... expression used to fill text
#'
#' @source \url{http://stackoverflow.com/questions/10374171/how-to-make-twitter-bootstraps-pre-blocks-scroll-horizontally}
#' @export
#
pre_scroll <- function(...){
htmltools::pre(
...,
style = "overflow: auto; word-wrap: normal; white-space: pre;"
)
}
# returns TRUE if dataframe has any numeric columns
df_has_numeric <- function(df){
x <- lapply(df, dplyr::type_sum)
x <- unlist(x)
x <- any(x %in% c("dbl", "int"))
x
}
# returns TRUE if dataframe has any POSIXct columns
df_has_time <- function(df){
x <- lapply(df, dplyr::type_sum)
x <- unlist(x)
x <- any(x %in% c("time"))
x
}
# returns TRUE if the dataframe parsed using the text has any POSIXct columns
# not parsed from ISO-8601
#
df_has_time_non_8601 <- function(df, txt, delim){
if (df_has_time(df)) {
# identify time columns of dataframe
col_sum <- lapply(df, dplyr::type_sum)
col_sum <- unlist(col_sum)
# turn this into a col_types specification
col_types <- ifelse(col_sum == "time", "c", "_")
col_types <- paste0(col_types, collapse = "")
# parse the text into character
df_txt <- readr::read_delim(txt, delim = delim, col_types = col_types)
# put into a matrix (limit to first 1000 rows)
mat_txt <- as.matrix(head(df_txt, 1000))
# test for iso_8601 pattern
all_8601 <- all(is_time_8601(mat_txt), na.rm = TRUE)
x <- !all_8601
} else {
x <- FALSE
}
x
}
is_time_8601 <- function(x){
# \\d{4} exactly 4 digits
# -? optional "-"
# \\d{2} exactly 2 digits
# -? optional "-"
# \\d{2} exactly 2 digits
regex_8601_date <- "\\d{4}-?\\d{2}-?\\d{2}"
# \\d{2} exactly 2 digits
# (:?\\d{2})? optional (optional ":", exactly 2 digits)
# (:?\\d{2})? optional (optional ":", exactly 2 digits)
# (\\.\\d{3})? optional (".", exactly 3 digits)
regex_8601_time <- "\\d{2}(:?\\d{2})?(:?\\d{2})?(\\.\\d{3})?"
# Z "Z"
# | or
# ([+-]\\d{2}(:?\\d{2})?) (one of "+,-", exactly 2 digits,
# optional (optional ":", exactly 2 digits))
regex_8601_zone <- "Z|([+-]\\d{2}(:?\\d{2})?)"
# ^ beginning of string
# [T ] "T" or " "
# $ end of string
regex_8601 <- paste0("^", regex_8601_date, "[T ]", regex_8601_time, regex_8601_zone, "$")
stringr::str_detect(x, regex_8601)
}
|
50250fb7d49c57875efcdf5ceecc98f253022b52
|
fbacfe68bd4fd04b2d1e980679b8521bdc6d2fa1
|
/man/tweet.Rd
|
9a6c4e7dbdb753c0985d4f8e58d964a62ce6de1f
|
[] |
no_license
|
cran/seismic
|
107f8486f0f3aaaa8e17a899783c9e432021e8a4
|
b1bc6c9f978369f0b11f91c89c2c5b62a87b038e
|
refs/heads/master
| 2022-06-18T09:18:16.150815
| 2022-05-20T20:30:02
| 2022-05-20T20:30:02
| 36,954,497
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 490
|
rd
|
tweet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tweetfunctions.R
\name{tweet}
\alias{tweet}
\title{An example information cascade}
\format{
A data frame with 15563 rows and 2 columns
}
\description{
A dataset containing all the (relative) resharing time and node degree of a tweet. The original Twitter ID is 127001313513967616.
}
\details{
\itemize{
\item relative_time_second. resharing time in seconds
\item number_of_followers. number of followers
}
}
|
e1fae9ccdb8dc1985cf44656aa4a6625cdfc896a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/UsingR/examples/sp500.excess.Rd.R
|
616dc9a9b42ee4ee4451913531e33f58e9139d02
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
sp500.excess.Rd.R
|
library(UsingR)
### Name: sp500.excess
### Title: Excess returns of S&P 500
### Aliases: sp500.excess
### Keywords: datasets
### ** Examples
data(sp500.excess)
plot(sp500.excess)
|
2e7a8e8453d7cbf6444bafa46ca147bf10cc46dc
|
87fdb51b3b0e92f42a3e33dbf07d0c01628d2aaa
|
/man/possol.Rd
|
7c6a59dda58c7f926bd3804a917ee82d5e6a8901
|
[] |
no_license
|
belasi01/Cops
|
0b5e0f04c46a639dfe5b8716199abf32757732f4
|
5cd0fa2f5fedb338cc063d1af21cf22ead0f9c5f
|
refs/heads/master
| 2023-07-25T16:07:36.614152
| 2023-07-12T01:08:54
| 2023-07-12T01:08:54
| 72,561,413
| 9
| 8
| null | 2022-08-31T16:13:21
| 2016-11-01T17:49:49
|
R
|
UTF-8
|
R
| false
| true
| 750
|
rd
|
possol.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/possol.R
\name{possol}
\alias{possol}
\title{Solar position as a function of date and time UTC and position (lat, lon)}
\usage{
possol(month, day, tu, xlon, xlat)
}
\arguments{
\item{month}{is the month (1 to 12)}
\item{day}{is the day of the month (1-31)}
\item{tu}{is the time UTC in decimal format (0.0 to 23.999)}
\item{xlon}{is the longitude in decimal degrees}
\item{xlat}{is the latitude in decimal degrees}
}
\value{
Returns a vector os two numeric for the zenithal and azimuthal angles in degrees
day is the number of the day in the month
}
\description{
Solar position as a function of date and time UTC and position (lat, lon)
}
\author{
Bernard Gentilly
}
|
5ecde356695f1bd7cd6babf96803661a3c2219b0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SimCorrMix/examples/calc_mixmoments.Rd.R
|
c7a02511978289efdfa74d9c56e1f7b42bb94604
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
calc_mixmoments.Rd.R
|
library(SimCorrMix)
### Name: calc_mixmoments
### Title: Find Standardized Cumulants of a Continuous Mixture Distribution
### by Method of Moments
### Aliases: calc_mixmoments
### Keywords: cumulants mixture
### ** Examples
# Mixture of Normal(-2, 1) and Normal(2, 1)
calc_mixmoments(mix_pis = c(0.4, 0.6), mix_mus = c(-2, 2),
mix_sigmas = c(1, 1), mix_skews = c(0, 0), mix_skurts = c(0, 0),
mix_fifths = c(0, 0), mix_sixths = c(0, 0))
|
130aad06679ef0a0db53ba6f3a20fd1d92251eae
|
16ba38ef11b82e93d3b581bbff2c21e099e014c4
|
/haohaninfo/交易實務案例/63.R
|
b6859e49497292f458f7b24e7798d708b4d13e71
|
[] |
no_license
|
penguinwang96825/Auto-Trading
|
cb7a5addfec71f611bdd82534b90e5219d0602dd
|
a031a921dbc036681c5054f2c035f94499b95d2e
|
refs/heads/master
| 2022-12-24T21:25:34.835436
| 2020-09-22T09:59:56
| 2020-09-22T09:59:56
| 292,052,986
| 2
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,038
|
r
|
63.R
|
#取得即時報價,詳細在技巧51
source("function.R")
#設定初始的資料格式
Qty <- matrix(, nrow = 0, ncol = 2)
MAnum <- 5
QMA <- NA
while(TRUE){
#取得成交資訊
Mdata<-GetMatchData(DataPath,Date)
MatchAmount <- as.numeric(Mdata[[1]][4])
HMTime <- as.numeric(paste0(substr(Mdata[[1]][1],1,2),substr(Mdata[[1]][1],4,5)))
#若為初始值,即更新最新一筆資訊
if(nrow(Qty)==0){
lastQty <- MatchAmount
Qty <- rbind(Qty,c(HMTime,0))
}else{
#若非初始值,即更新最新一筆資訊
#換分鐘則新增一筆資料
if(HMTime > Qty[nrow(Qty),1]){
if (nrow(Qty) > MAnum){
Qty <- rbind(Qty[-1,] ,c(HMTime,MatchAmount-lastQty))
lastQty <- MatchAmount
QMA <- sum(Qty[1:MAnum,2])/MAnum
}else{
Qty <- rbind(Qty ,c(HMTime,MatchAmount-lastQty))
lastQty <- MatchAmount
}
}else{
Qty[nrow(Qty),2] <- MatchAmount-lastQty
}
}
#顯示參數,確認程式是否正確
print(Qty)
if(!is.na(QMA)){
print(QMA)
}
}
|
4ded814ba2965e49184077dd42e38acb595e8792
|
be7f65793d79b77b520739cf7aa24008829ab92f
|
/hw3/My code and submission/hw3.R
|
04e7b797f89f824f2a8d759ccdf91fd7e550e283
|
[] |
no_license
|
aten2001/ISYE-6503
|
5506a8ba5d3c6ef4357f23e65db3acac10f7cd42
|
18824dc41b8ac381b551db041ba24e0fedaf2aad
|
refs/heads/master
| 2022-01-27T17:07:50.912732
| 2019-07-01T20:51:32
| 2019-07-01T20:51:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,347
|
r
|
hw3.R
|
library(data.table)
library("smooth")
setwd("//cdc.gov/private/L137/yks5/OMSA/ISYE6501/hw3")
setwd("G:/a-XiaoWang/OMSA/ISYE-6501/hw2")
#Question 7.2
Using the 20 years of daily high temperature data for Atlanta (July through October) from Question 6.2
(file temps.txt), build and use an exponential smoothing model to help make a judgment of whether
the unofficial end of summer has gotten later over the 20 years.
temps<-read.table("//cdc.gov/private/L137/yks5/OMSA/ISYE6501/hw3/temps.txt",header=TRUE)
#convert to time series
temps<-read.table("temps.txt",header=TRUE)
temps_vec<-as.vector(unlist(temps[,2:21]))
temps_vec
plot(temps_vec)
temps_ts<-ts(temps_vec,start=1996,frequency = 123)
temps_ts
plot(temps_ts)
plot(decompose(temps_ts))
temps_hw<-HoltWinters(temps_ts,alpha = NULL, beta= NULL, gamma = NULL,seasonal = "multiplicative")
temps_hw
summary(temps_hw)
plot(temps_hw)
#We can see from the picture that the in-sample forecasts agree pretty well with the observed values
#going to look at seasonal factors
head(temps_hw$fitted)
tail(temps_hw$fitted)
temps_hw_sf<-matrix(temps_hw$fitted[,4],nrow=123)
head(temps_hw_sf)
temps_hw_smoothed<-matrix(temps_hw$fitted[,1],nrow=123)
temps_hw_smoothed
#I am using the predicted value (Xhat) and the observed value (data from original temps.txt) to see if summer has gotten later over the 20 years.
#When the difference between the predicted value and the observed value reached max, which means predicted value is much higher than the observed value,
#among the whole period, I assume that day is the summer end date (the model thought the temperature was still high, while the actual temperature already went down).
#Then I compare the date from year 1997 to year 2015 to see how the date varies.
fit<-temps_hw$fitted
y1997<- data.frame(fit = fit[1:123,1], obs = temps[,2])
y1997$dif <-y1997$fit - y1997$obs
head(y1997)
a<-which.max(y1997$dif)
y1997[which.max(y1997$dif),]
#the 100th day is the day where predicted tempererature is much higher than the actual temperature.
y1998<- data.frame(fit = fit[124:246,1], obs = temps[,3])
y1998$dif <-y1998$fit - y1998$obs
b<-which.max(y1998$dif)
y1999<- data.frame(fit = fit[247:369,1], obs = temps[,3])
y1999$dif <-y1999$fit - y1999$obs
c<-which.max(y1999$dif)
y2000<- data.frame(fit = fit[370:492,1], obs = temps[,3])
y2000$dif <-y2000$fit - y2000$obs
d<-which.max(y2000$dif)
y2001<- data.frame(fit = fit[493:615,1], obs = temps[,3])
y2001$dif <-y2001$fit - y2001$obs
e<-which.max(y2001$dif)
y2002<- data.frame(fit = fit[616:738,1], obs = temps[,3])
y2002$dif <-y2002$fit - y2002$obs
f<-which.max(y2002$dif)
y2003<- data.frame(fit = fit[739:861,1], obs = temps[,3])
y2003$dif <-y2003$fit - y2003$obs
g<-which.max(y2003$dif)
y2004<- data.frame(fit = fit[862:984,1], obs = temps[,3])
y2004$dif <-y2004$fit - y2004$obs
h<-which.max(y2004$dif)
y2005<- data.frame(fit = fit[985:1107,1], obs = temps[,3])
y2005$dif <-y2005$fit - y2005$obs
i<-which.max(y2005$dif)
y2006<- data.frame(fit = fit[1108:1230,1], obs = temps[,3])
y2006$dif <-y2006$fit - y2006$obs
j<-which.max(y2006$dif)
y2007<- data.frame(fit = fit[1231:1353,1], obs = temps[,3])
y2007$dif <-y2007$fit - y2007$obs
k<-which.max(y2007$dif)
y2008<- data.frame(fit = fit[1354:1476,1], obs = temps[,3])
y2008$dif <-y2008$fit - y2008$obs
l<-which.max(y2008$dif)
y2009<- data.frame(fit = fit[1477:1599,1], obs = temps[,3])
y2009$dif <-y2009$fit - y2009$obs
m<-which.max(y2009$dif)
y2010<- data.frame(fit = fit[1600:1722,1], obs = temps[,3])
y2010$dif <-y2010$fit - y2010$obs
n<-which.max(y2010$dif)
y2011<- data.frame(fit = fit[1723:1845,1], obs = temps[,3])
y2011$dif <-y2011$fit - y2011$obs
o<-which.max(y2011$dif)
y2012<- data.frame(fit = fit[1846:1968,1], obs = temps[,3])
y2012$dif <-y2012$fit - y2012$obs
p<-which.max(y2012$dif)
y2013<- data.frame(fit = fit[1969:2091,1], obs = temps[,3])
y2013$dif <-y2013$fit - y2013$obs
q<-which.max(y2013$dif)
y2014<- data.frame(fit = fit[2092:2214,1], obs = temps[,3])
y2014$dif <-y2014$fit - y2014$obs
r<-which.max(y2014$dif)
y2015<- data.frame(fit = fit[2215:2337,1], obs = temps[,3])
y2015$dif <-y2015$fit - y2015$obs
s<-which.max(y2015$dif)
end<-c(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s)
plot(end)
#The plot shows the end days stays within a range of 100 -120 days from July 1st. So I would
#say the summer does not get later over the 20 years.
#Question 8.2
Using crime data from http://www.statsci.org/data/general/uscrime.txt
(file uscrime.txt,description at http://www.statsci.org/data/general/uscrime.html ),
use regression (a useful R function is lm or glm) to predict the observed crime rate in a city with the following data:
M = 14.0
So = 0
Ed = 10.0
Po1 = 12.0
Po2 = 15.5
LF = 0.640
M.F = 94.0
Pop = 150
NW = 1.1
U1 = 0.120
U2 = 3.6
Wealth = 3200
Ineq = 20.1
Prob = 0.04
Time = 39.0
#Show your model (factors used and their coefficients), the software output, and the quality of fit.
crime<- read.table("http://www.statsci.org/data/general/uscrime.txt",header=TRUE)
head(crime)
model1<-lm(Crime~.,crime)
summary(model1)
#I am using 0.05 as the threshold. Based on the p-value, I will keep M,Ed,Ineq,Prob,to fit a new model.
model2<-lm(Crime~M+Ed+Ineq+Prob,crime)
summary(model2)
#Now only the Ed and Prob still remains significant. The adjusted R-squred dropped from 0.7078 to 0.1927. I will fit a new model to see what happens.
model3<-lm(Crime~Ed+Prob,crime)
summary(model3)
#Now only Prob remains significant. The adjusted R-squred dropped from 0.1927. to 0.1756. So the first model with all the variables included truns out to be
#the best model so far. Next, I will try to use every combination to find the "best" model.
# create a NULL vector called model so we have something to add our layers to
model=NULL
# create a vector of the dataframe column names used to build the formula
vars = names(crime)
# remove the response variable (it's in the 16th column)
vars = vars[-16]
# the combn function will run every different combination of variables and then run the lm
for(i in 1:length(vars)){
xx = combn(vars,i)
if(is.null(dim(xx))){
fla = paste("Crime ~", paste(xx, collapse="+"))
model[[length(model)+1]]=lm(as.formula(fla),data=crime)
} else {
for(j in 1:dim(xx)[2]){
fla = paste("Crime ~", paste(xx[1:dim(xx)[1],j], collapse="+"))
model[[length(model)+1]]=lm(as.formula(fla),data=crime)
}
}
}
# see how many models were build using the loop above
length(model)
# create a vector to extract AIC and BIC values from the model variable
AICs = NULL
BICs = NULL
for(i in 1:length(model)){
AICs[i] = AIC(model[[i]])
BICs[i] = BIC(model[[i]])
}
#see which models were chosen as best by AIC and BIC
which(AICs==min(AICs))
which(BICs==min(BICs))
#see which variables are in those models, and the corresponding adjusted R-squared.
summary(model[[18494]])
summary(model[[24966]])
summary(model[[5817]])
summary(model[[11564]])
#From the output, we can see the first two are the same model, and the last two are the same model. I will compare these two models with the model1, which has all the variables included
AIC(model1,model[[18494]],model[[5817]])
BIC(model1,model[[18494]],model[[5817]])
data.table(model1=0.7078,model18494=0.7444,model5817=0.7307)
#In conclusion, I would say model18494 is the best model I can found. The equation of the model is:
#crime= -6426.10+93.32*M+180.12*Ed+102.65*Po1+ 22.34*M.F-6086.63*U1+187.35*U2+61.33*Ineq-3796.03*Prob
#The corresponding AIC is 639.3151, BIC is 657.8166, and the adjusted R-squared is 0.7444
#Use the seleted model to find the crime rate in the city with data provided:
crimerate=-6426.10+93.32*14.0+180.12*10.0+102.65*12.0+ 22.34*94.0-6086.63*0.120+187.35*3.6+61.33*20.1-3796.03*0.04
crimerate
testpoint<-data.frame(M = 14.0,
So = 0,
Ed = 10.0,
Po1 = 12.0,
Po2 = 15.5,
LF = 0.640,
M.F = 94.0,
Pop = 150,
NW = 1.1,
U1 = 0.120,
U2 = 3.6,
Wealth = 3200,
Ineq = 20.1,
Prob = 0.04,
Time = 39.0)
preditc<-predict(model[[18494]],testpoint)
#perfrom 4-fold cross validation with the linear models that was choosen;
qqnorm(crime$Crime)
set.seed(1234)
lm1<-cv.lm(crime,model1,m=4)
lml18494<-cv.lm(crime,model[[18494]],m=4)
# We can calculate the R-squared values directly.
# R-squared = 1 - SSEresiduals/SSEtotal
#
# total sum of squared differences between data and its mean
SStot <- sum((crime$Crime - mean(crime$Crime))^2)
SStot
# for model, model2, and cross-validation, calculated SEres
SSres_model <- sum(model$residuals^2)
SSres_model2 <- sum(model2$residuals^2)
SSres_c <- attr(c,"ms")*nrow(dat) # mean squared error, times number of data points, gives sum of squared errors
|
55b8150ddb89f16e8396aa1e3fb17a20c2f03353
|
afe4c3518d472c82479f9ed646491545e1ee6718
|
/2.scripts/4.all.figures.R
|
dbab795be889d1b82049f1cfb70252a4ca496e0f
|
[] |
no_license
|
gmkov/microclimate.thermal.tolerance.Heliconius-JEB-2020
|
fdeec8271e2e623138d646c55bec7d1b9221dc40
|
795646343b2f8a082628e1221017f1e62b5be0d2
|
refs/heads/master
| 2020-12-27T08:45:34.752930
| 2020-02-02T21:25:30
| 2020-02-02T21:25:30
| 237,835,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73,168
|
r
|
4.all.figures.R
|
### Montejo-Kovacevich, G., Martin, S.H., Meier, J.I., Bacquet, C.N., Monllor, M., Jiggins, C.D. and Nadeau, N.J., 2020. ###
### Microclimate buffering and thermal tolerance across elevations in a tropical butterfly. ###
### Journal of Experimental Biology. ######
##################### ALL FIGURES PREP ######################
##### packages ######
rm(list=ls())
dev.off()
library(dplyr)
library(tidyr)
library(reshape2)
library(vegan)
library(grid)
library(cowplot)
library(gridExtra)
library(RColorBrewer)
library(viridis)
library(ggrepel)
library(ggplot2)
library(RColorBrewer)
library(lemon)
library(multcompView)
library(pwr)
library(egg)
##### data ######
setwd("microclimate.thermal.tolerance.Heliconius-JEB-2020/")
logger.info <- read.csv("1.data/logger.info.csv")
wild.temp.all <- read.csv("1.data/wild.temp.data.csv")
wild.humi.all <- read.csv("1.data/wild.humi.data.csv")
logger_all_temp_mean_date.time <- read.csv("1.data/fig1.1.logger.hourly.means.csv")
month.wc <-read.csv("1.data/fig1.1.monthly.raw.temps.wc.csv")
daily_temp_mean<-read.csv("1.data/fig1.2.daily.mean.alllogger.daily.csv")
daily_temp_max<-read.csv("1.data/fig1.2.daily.max.alllogger.daily.csv")
daily_temp_min<-read.csv("1.data/fig1.2.daily.min.alllogger.daily.csv")
summ.temp.hours.side.alt.height <- read.csv("1.data/fig2.A.summ.temp.hours.side.alt.height.csv")
yearly.daily_temp_max.min.spread.area <- read.csv("1.data/fig2.B.yearly.daily_temp_max.min.spread.area.csv")
daily.annual.means.per.logger <- read.csv( "1.data/fig2.C.daily.annual.means.per.logger.wc.comb.csv")
summ.localities <- read.csv("1.data/summ.localities.csv")
coll <- read.csv("1.data/cg.coll.reach.adult.csv")
ther <- read.csv("1.data/ther.ALL.csv")
##### prep #####
# create datetime variable for yearly plots
wild.temp.all$date.time <- paste(wild.temp.all$Date, wild.temp.all$Time)
wild.humi.all$date.time <- paste(wild.humi.all$Date, wild.humi.all$Time)
# summary stats logger height. altitude
mean(logger.info$logger.height[logger.info$canopy_understory=="c"&!(is.na(logger.info$logger.height))])
mean(logger.info$logger.height[logger.info$canopy_understory=="u"&!(is.na(logger.info$logger.height))])
mean(logger.info$point_altitude[logger.info$alt_type=="high"&!(is.na(logger.info$point_altitude))])
mean(logger.info$point_altitude[logger.info$alt_type=="low"&!(is.na(logger.info$point_altitude))])
month$alt.type_height <- paste(month$alt_type, month$height, sep="_")
month.daily$alt.type_height <- paste(month.daily$alt_type, month.daily$height, sep="_")
month.logger$alt.type_height <- paste(month.logger$alt_type, month.logger$height, sep="_")
# plotting variables
cols1 <- alpha(c("#009E73","#009E73","#D55E00","#D55E00"), 0.9)
month.names <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
ylab <- "Temperature (°C)"
##### functions #####
# modified so that the order of tukey groupings matches order of
generate_label_df <- function(TUKEY, variable){
# Extract labels and factor levels from Tukey post-hoc
Tukey.levels <- TUKEY[[variable]][,4]
Tukey.labels <- data.frame(multcompLetters4(alt.aov, TUKEY)$alt_height_slope$Letters)
names(Tukey.labels) <- 'Letters'
#I need to put the labels in the same order as in the boxplot :
Tukey.labels$treatment=rownames(Tukey.labels)
Tukey.labels=Tukey.labels[order(Tukey.labels$treatment) , ]
return(Tukey.labels)
}
capitalize <- function(string) {
substr(string, 1, 1) <- toupper(substr(string, 1, 1))
string
}
# change facet wrap titles
alt_names <- list('high'="Highlands", 'low'="Lowlands")
alt_labeller <- function(variable,value){
return(alt_names[value])}
########################################## FIGURE 1 #####################
############### 1. yearly raw temp ###########
### a1 west temp low #####
#data prep
x.axis.order <- c("low_c_west","low_u_west")
a1.dat <- subset(logger_all_temp_mean_date.time, side_andes=="west"&alt_type=="low")
a1.dat$date.time <- as.POSIXct(a1.dat$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
a1.dat.wild <- subset(wild.temp.all, side_andes=="west"&alt_type=="low")
a1.dat.wild$date.time <- as.POSIXct(a1.dat.wild$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
a1.wc.dat <- subset(month.wc, side_andes=="west"&alt_type=="low")
a1.wc.dat$date.time <- paste(a1.wc.dat$date.wc.plot, "12:00:00", sep = " ")
a1.wc.dat$date.time <- as.POSIXct(a1.wc.dat$date.time, "%d/%m/%Y %H:%M:%S", tz="Europe/London")
a1 <- ggplot(data=a1.dat, aes(x=date.time, y=value, color=alt_height_slope)) + # the variables of interest
geom_line(inherit.aes = FALSE,aes(x=date.time, y=value), data=a1.dat.wild,size=.7, color="lightgrey")+
geom_line(size=.7) +
xlab("") +
ylab("Temperature (°C)") +
#geom_text(size=5,color="black",aes(x=as.POSIXct("2017-02-07 00:00:00", "%Y-%m-%d %H:%M:%S"),y=42, label="A1"))+
coord_cartesian(ylim=c(10,42))+
scale_color_manual(limits=x.axis.order,
values=alpha(c( "#D55E00","#0072B2"), 0.9))+
theme_classic()+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a1.wc.dat, type=="tmean"), colour="black",size=.8,alpha=.5,,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a1.wc.dat, type=="tmean"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a1.wc.dat, type=="tmax"), colour="black",size=.8,alpha=.5,,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a1.wc.dat, type=="tmax"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a1.wc.dat, type=="tmin"), colour="black",size=.8,alpha=.5,,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a1.wc.dat, type=="tmin"), colour="black", linetype="dashed",alpha=.5,size=.5)+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10)); a1
### a2 west temp high #####
#data prep
x.axis.order <- c("high_c_west","high_u_west")
a2.dat <- subset(logger_all_temp_mean_date.time, side_andes=="west"|alt_type=="high")
a2.dat$date.time <- as.POSIXct(a2.dat$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
a2.dat.wild <- subset(wild.temp.all, side_andes=="west"&alt_type=="high")
a2.dat.wild$date.time <- as.POSIXct(a2.dat.wild$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
a2.wc.dat <- subset(month.wc, side_andes=="west"&alt_type=="high")
a2.wc.dat$date.time <- paste(a2.wc.dat$date.wc.plot, "12:00:00", sep = " ")
a2.wc.dat$date.time <- as.POSIXct(a2.wc.dat$date.time, "%d/%m/%Y %H:%M:%S", tz="Europe/London")
a2 <- ggplot(data=a2.dat, aes(x=date.time, y=value, color=alt_height_slope)) + # the variables of interest
geom_line(inherit.aes = FALSE,aes(x=date.time, y=value), data=a2.dat.wild,size=.7, color="lightgrey")+
geom_line(size=.7) +
#geom_text(size=5,color="black",aes(x=as.POSIXct("2017-02-07 00:00:00", "%Y-%m-%d %H:%M:%S"),y=42, label="A2"))+
xlab("") + ylab("") +
coord_cartesian(ylim=c(10,42))+
scale_color_manual(limits=x.axis.order, values=alpha(c("#E69F00","#56B4E9"), 0.9))+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a2.wc.dat, type=="tmean"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a2.wc.dat, type=="tmean"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a2.wc.dat, type=="tmax"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a2.wc.dat, type=="tmax"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a2.wc.dat, type=="tmin"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(a2.wc.dat, type=="tmin"), colour="black", linetype="dashed",alpha=.5,size=.5)+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10)); a2
### b1 raw east temp low #####
x.axis.order <- c("low_c_east","low_u_east")
b1.dat <- subset(logger_all_temp_mean_date.time, (side_andes=="east"|alt_type=="low"))
b1.dat$date.time <- as.POSIXct(b1.dat$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
b1.dat.wild <- subset(wild.temp.all, side_andes=="east"&alt_type=="low")
b1.dat.wild$date.time <- as.POSIXct(b1.dat.wild$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
b1.wc.dat <- subset(month.wc, side_andes=="east"&alt_type=="low")
b1.wc.dat$date.time <- paste(b1.wc.dat$date.wc.plot, "12:00:00", sep = " ")
b1.wc.dat$date.time <- as.POSIXct(b1.wc.dat$date.time, "%d/%m/%Y %H:%M:%S", tz="Europe/London")
b1 <- ggplot(data=b1.dat, aes(x=date.time, y=value, color=alt_height_slope)) + # the variables of interest
geom_line(inherit.aes = FALSE,aes(x=date.time, y=value), data=b1.dat.wild,size=.7, color="lightgrey")+
geom_line(size=.7) +
xlab("") +
ylab("Temperature (°C)") +
#geom_text(size=5,color="black",aes(x=as.POSIXct("2017-02-07 00:00:00", "%Y-%m-%d %H:%M:%S"),y=42, label="B1"))+
coord_cartesian(ylim=c(10,42))+
scale_color_manual(limits=x.axis.order,values=alpha(c( "#D55E00","#0072B2"), 0.9))+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b1.wc.dat, type=="tmean"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b1.wc.dat, type=="tmean"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b1.wc.dat, type=="tmax"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b1.wc.dat, type=="tmax"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b1.wc.dat, type=="tmin"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b1.wc.dat, type=="tmin"), colour="black", linetype="dashed",alpha=.5,size=.5)+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10)); b1
### b2 raw east temp high #####
#data prep
x.axis.order <- c("high_c_east","high_u_east")
b2.dat <- subset(logger_all_temp_mean_date.time, side_andes=="east"|alt_type=="high")
b2.dat$date.time <- as.POSIXct(b2.dat$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
b2.dat.wild <- subset(wild.temp.all, side_andes=="east"&alt_type=="high")
b2.dat.wild$date.time <- as.POSIXct(b2.dat.wild$date.time, "%Y-%m-%d %H:%M:%S", tz="Europe/London")
b2.wc.dat <- subset(month.wc, side_andes=="east"&alt_type=="high")
b2.wc.dat$date.time <- paste(b2.wc.dat$date.wc.plot, "12:00:00", sep = " ")
b2.wc.dat$date.time <- as.POSIXct(b2.wc.dat$date.time, "%d/%m/%Y %H:%M:%S", tz="Europe/London")
b2 <- ggplot(data=b2.dat, aes(x=date.time, y=value, color=alt_height_slope)) + # the variables of interest
geom_line(inherit.aes = FALSE,aes(x=date.time, y=value), data=b2.dat.wild,size=.7, color="lightgrey")+
geom_line(size=.7) +
#geom_text(size=5,color="black",aes(x=as.POSIXct("2017-02-07 00:00:00", "%Y-%m-%d %H:%M:%S"),y=42, label="B2"))+
xlab("") +
ylab("") +
coord_cartesian(ylim=c(10,42))+
scale_color_manual(limits=x.axis.order,values=alpha(c("#E69F00","#56B4E9"), 0.9))+
theme_classic()+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b2.wc.dat, type=="tmean"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b2.wc.dat, type=="tmean"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b2.wc.dat, type=="tmax"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b2.wc.dat, type=="tmax"), colour="black", linetype="dashed",alpha=.5,size=.5)+
geom_point(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b2.wc.dat, type=="tmin"), colour="black",size=.8,alpha=.5)+
geom_line(inherit.aes = FALSE, aes(x=date.time, y=mean.value),data=subset(b2.wc.dat, type=="tmin"), colour="black", linetype="dashed",alpha=.5,size=.5)+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10)); b2
############## 2 BOXPLOT MEANS #########
##### MAX a3, b3 ####
### a3 max west temp ####
a3.dat <- subset(daily_temp_max, side_andes=="west")
names(a3.dat)
a3.dat$alt_height_slope <- factor(a3.dat$alt_height_slope, levels = c("low_c_west", "low_u_west","high_c_west", "high_u_west"))
a3.dat <- a3.dat[order(factor(a3.dat$alt_height_slope, levels = c("low_c_west", "low_u_west","high_c_west", "high_u_west"))),]
#prep
x.axis.order <- c("low_c_west","low_u_west","high_c_west","high_u_west")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
# use this tukey function, as they are all significant anyways
# but real tukey tests are in 6.clim.an- based on LMM
alt.aov <-aov(value ~ alt_height_slope, data=a3.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(a3.dat, alt_height_slope),
mean=max(value))
final<-merge(labels,yvalue)
#plot
a3 <- ggplot(a3.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 36))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-1,hjust=0) +
ylab(c("Temperature (°C)"))+
scale_x_discrete(labels=c("", "", "", ""))+
scale_color_manual(limits=x.axis.order,
values=c( "#D55E00","#0072B2", "#E69F00","#56B4E9"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#D55E00","#0072B2", "#E69F00","#56B4E9"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10)); a3
### b3 max east temp ####
b3.dat <- subset(daily_temp_max, side_andes=="east")
names(b3.dat)
b3.dat$alt_height_slope <- factor(b3.dat$alt_height_slope, levels = c("low_c_east", "low_u_east","high_c_east", "high_u_east"))
b3.dat <- b3.dat[order(factor(b3.dat$alt_height_slope, levels = c("low_c_east", "low_u_east","high_c_east", "high_u_east"))),]
#prep
x.axis.order <- c("low_c_east","low_u_east","high_c_east","high_u_east")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
alt.aov <-aov(value ~ alt_height_slope, data=b3.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(b3.dat, alt_height_slope),
mean=max(value))
final<-merge(labels,yvalue)
#plot
b3 <- ggplot(b3.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 36))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-1,hjust=0) +
ylab(c("Temperature (°C)"))+
scale_x_discrete(labels=c("Canopy", "Understory", "Canopy", "Understory"))+
#geom_text(size=4.5, color="black", aes(x=(1), y=24.5, label="b"), fontface="plain")+ #adding the superscrpts
#geom_text(size=4.5, color="black", aes(x=(2), y=24.5, label="ab"), fontface="plain")+
#geom_text(size=4.5, color="black", aes(x=(3), y=24.5, label="a"), fontface="plain")+
#geom_text(size=5,color="black",aes(x=0.9,y=35.5, label="b3"))+
scale_color_manual(limits=x.axis.order,
values=c( "#D55E00","#0072B2", "#E69F00","#56B4E9"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#D55E00","#0072B2", "#E69F00","#56B4E9"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text.x = element_text(size=10, angle = 25, hjust = 1)); b3
# b3 <- ggplot_gtable(ggplot_build(b3))
# b3$layout$clip[b3$layout$name == "panel"] <- "off"
# grid.draw(b3)
##### MEAN a4, b4 ####
### a4 mean west temp ####
a4.dat <- subset(daily_temp_mean, side_andes=="west")
names(a4.dat)
a4.dat$alt_height_slope <- factor(a4.dat$alt_height_slope, levels = c("low_c_west", "low_u_west","high_c_west", "high_u_west"))
a4.dat <- a4.dat[order(factor(a4.dat$alt_height_slope, levels = c("low_c_west", "low_u_west","high_c_west", "high_u_west"))),]
#prep
x.axis.order <- c("low_c_west","low_u_west","high_c_west","high_u_west")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
alt.aov <-aov(value ~ alt_height_slope, data=a4.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(a4.dat, alt_height_slope),
mean=mean(value))
final<-merge(labels,yvalue)
#plot
a4 <- ggplot(a4.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 36))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-2,hjust=0) +
ylab(c(""))+
scale_x_discrete(labels=c("", "", "", ""))+
#geom_text(size=4.5, color="black", aes(x=(1), y=24.5, label="b"), fontface="plain")+ #adding the superscrpts
#geom_text(size=4.5, color="black", aes(x=(2), y=24.5, label="ab"), fontface="plain")+
#geom_text(size=4.5, color="black", aes(x=(3), y=24.5, label="a"), fontface="plain")+
#geom_text(size=5,color="black",aes(x=0.9,y=35.5, label="a4"))+
scale_color_manual(limits=x.axis.order,
values=c( "#D55E00","#0072B2", "#E69F00","#56B4E9"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#D55E00","#0072B2", "#E69F00","#56B4E9"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10)); a4
# a4 <- ggplot_gtable(ggplot_build(a4))
# a4$layout$clip[a4$layout$name == "panel"] <- "off"
# grid.draw(a4)
### b4 mean east temp ####
b4.dat <- subset(daily_temp_mean, side_andes=="east")
names(b4.dat)
b4.dat$alt_height_slope <- factor(b4.dat$alt_height_slope, levels = c("low_c_east", "low_u_east","high_c_east", "high_u_east"))
b4.dat <- b4.dat[order(factor(b4.dat$alt_height_slope, levels = c("low_c_east", "low_u_east","high_c_east", "high_u_east"))),]
#prep
x.axis.order <- c("low_c_east","low_u_east","high_c_east","high_u_east")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
alt.aov <-aov(value ~ alt_height_slope, data=b4.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(b4.dat, alt_height_slope),
mean=mean(value))
final<-merge(labels,yvalue)
#plot
b4 <- ggplot(b4.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 36))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-3,hjust=0) +
ylab(c(""))+
scale_x_discrete(labels=c("Canopy", "Understory", "Canopy", "Understory"))+
#geom_text(size=4.5, color="black", aes(x=(1), y=24.5, label="b"), fontface="plain")+ #adding the superscrpts
#geom_text(size=4.5, color="black", aes(x=(2), y=24.5, label="ab"), fontface="plain")+
#geom_text(size=4.5, color="black", aes(x=(3), y=24.5, label="a"), fontface="plain")+
#geom_text(size=5,color="black",aes(x=0.9,y=35.5, label="b4"))+
scale_color_manual(limits=x.axis.order,
values=c( "#D55E00","#0072B2", "#E69F00","#56B4E9"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#D55E00","#0072B2", "#E69F00","#56B4E9"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10),
axis.text.x = element_text(size=10, angle = 25, hjust = 1)); b4
# b4 <- ggplot_gtable(ggplot_build(b4))
# b4$layout$clip[b4$layout$name == "panel"] <- "off"
# grid.draw(b4)
##### MIN a5, b5, c5, d5, ####
### a5 min west temp ####
a5.dat <- subset(daily_temp_min, side_andes=="west")
names(a5.dat)
a5.dat$alt_height_slope <- factor(a5.dat$alt_height_slope, levels = c("low_c_west", "low_u_west","high_c_west", "high_u_west"))
a5.dat <- a5.dat[order(factor(a5.dat$alt_height_slope, levels = c("low_c_west", "low_u_west","high_c_west", "high_u_west"))),]
#prep
x.axis.order <- c("low_c_west","low_u_west","high_c_west","high_u_west")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
alt.aov <-aov(value ~ alt_height_slope, data=a5.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(a5.dat, alt_height_slope),
mean=mean(value))
final<-merge(labels,yvalue)
#plot
a5 <- ggplot(a5.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 36))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-2,hjust=0) +
ylab(c(""))+
scale_x_discrete(labels=c("", "", "", ""))+
#geom_text(size=4.5, color="black", aes(x=(1), y=24.5, label="b"), fontface="plain")+ #adding the superscrpts
#geom_text(size=4.5, color="black", aes(x=(2), y=24.5, label="ab"), fontface="plain")+
#geom_text(size=4.5, color="black", aes(x=(3), y=24.5, label="a"), fontface="plain")+
#geom_text(size=5,color="black",aes(x=0.9,y=35.5, label="a5"))+
scale_color_manual(limits=x.axis.order,
values=c( "#D55E00","#0072B2", "#E69F00","#56B4E9"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#D55E00","#0072B2", "#E69F00","#56B4E9"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10)); a5
# a5 <- ggplot_gtable(ggplot_build(a5))
# a5$layout$clip[a5$layout$name == "panel"] <- "off"
# grid.draw(a5)
### b5 min east temp ####
b5.dat <- subset(daily_temp_min, side_andes=="east")
names(b5.dat)
b5.dat$alt_height_slope <- factor(b5.dat$alt_height_slope, levels = c("low_c_east", "low_u_east","high_c_east", "high_u_east"))
b5.dat <- b5.dat[order(factor(b5.dat$alt_height_slope, levels = c("low_c_east", "low_u_east","high_c_east", "high_u_east"))),]
#prep
x.axis.order <- c("low_c_east","low_u_east","high_c_east","high_u_east")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
alt.aov <-aov(value ~ alt_height_slope, data=b5.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(b5.dat, alt_height_slope),
mean=mean(value))
final<-merge(labels,yvalue)
#plot
b5 <- ggplot(b5.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 36))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-2,hjust=0) +
ylab(c(""))+
scale_x_discrete(labels=c("Canopy", "Understory", "Canopy", "Understory"))+
#geom_text(size=4.5, color="black", aes(x=(1), y=24.5, label="b"), fontface="plain")+ #adding the superscrpts
#geom_text(size=4.5, color="black", aes(x=(2), y=24.5, label="ab"), fontface="plain")+
#geom_text(size=4.5, color="black", aes(x=(3), y=24.5, label="a"), fontface="plain")+
#geom_text(size=5,color="black",aes(x=0.9,y=35.5, label="b5"))+
scale_color_manual(limits=x.axis.order,
values=c( "#D55E00","#0072B2", "#E69F00","#56B4E9"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#D55E00","#0072B2", "#E69F00","#56B4E9"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10),
axis.text.x = element_text(size=10, angle = 25, hjust = 1)); b5
# b5 <- ggplot_gtable(ggplot_build(b5))
# b5$layout$clip[b5$layout$name == "panel"] <- "off"
# grid.draw(b5)
### A6 min west hum ####
c5.dat <- subset(daily_hum_min, side_andes=="west")
names(c5.dat)
#prep
x.axis.order <- c("low_c_west","low_u_west","high_c_west","high_u_west")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
#plot
c5 <- ggplot(c5.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(40, 115))+
scale_x_discrete(limits=x.axis.order,
labels=c("", "", "", ""))+
#geom_text(size=4.5, color="black", aes(x=(1), y=24.5, label="b"), fontface="plain")+ #adding the superscrpts
#geom_text(size=4.5, color="black", aes(x=(2), y=24.5, label="ab"), fontface="plain")+
#geom_text(size=4.5, color="black", aes(x=(3), y=24.5, label="a"), fontface="plain")+
#geom_text(size=5,color="black",aes(x=0.9,y=105, label="C5"))+
scale_color_manual(limits=x.axis.order,
values=c("cadetblue4", "darkorchid4", "violet", "cadetblue1"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c("cadetblue4", "darkorchid4", "violet", "cadetblue1"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10))
# annotation_custom(text_low,xmin=1,xmax=2,ymin=25,ymax=25) +
# annotation_custom(text_high,xmin=3,xmax=4,ymin=25,ymax=25)
c5
c5 <- ggplot_gtable(ggplot_build(c5))
c5$layout$clip[c5$layout$name == "panel"] <- "off"
grid.draw(c5)
### B6 min east hum ####
d5.dat <- subset(daily_hum_min, side_andes=="east")
names(d5.dat)
#prep
x.axis.order <- c("low_c_east","low_u_east","high_c_east","high_u_east")
text_high <- textGrob("High A", gp=gpar(fontsize=12, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=12, fontface="bold"))
#plot
d5 <- ggplot(d5.dat, aes(y=value, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(40, 115))+
scale_x_discrete(limits=x.axis.order,
labels=labels=c("Canopy", "Understory", "Canopy", "Understory"))+
#geom_text(size=4.5, color="black", aes(x=(1), y=24.5, label="b"), fontface="plain")+ #adding the superscrpts
#geom_text(size=4.5, color="black", aes(x=(2), y=24.5, label="ab"), fontface="plain")+
#geom_text(size=4.5, color="black", aes(x=(3), y=24.5, label="a"), fontface="plain")+
#geom_text(size=5,color="black",aes(x=0.9,y=105, label="D5"))+
scale_color_manual(limits=x.axis.order,
values=c("cadetblue4", "darkorchid4", "violet", "cadetblue1"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c("cadetblue4", "darkorchid4", "violet", "cadetblue1"), 0.6))+
theme_classic()+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10))+
annotation_custom(text_low,xmin=1,xmax=2,ymin=25,ymax=25) +
annotation_custom(text_high,xmin=3,xmax=4,ymin=25,ymax=25)
d5
d5 <- ggplot_gtable(ggplot_build(d5))
d5$layout$clip[d5$layout$name == "panel"] <- "off"
grid.draw(d5)
##### combine plots ######
a<- plot_grid(a1,a2,a3,a4,a5,b1,b2,b3,b4,b5,rel_widths=c(2,2,1,1,1,2,2,1,1,1),
labels = c("A1 Lowlands","A2 Highlands","A3 Daily Max.","A4 Daily Mean","A5 Daily Min.",
"B1","B2","B3","B4", "B5"), label_size = 12,
ncol = 5, nrow = 2,hjust = 0, label_x = 0.1, label_y=1.12, align = "hv")+
theme(plot.margin = unit(c(1, 0, 0, 0), "cm")); a
ggsave2("../figures/fig1.temp.year.mean.min.max.wc.png",a, width = 13, height = 6, dpi = 300 )
########################################## FIGURE 2 #####################
###### 1. fig2A daily hours wild ######
cols1 <- alpha(c("#E69F00","#56B4E9","#D55E00","#0072B2"), 0.9)
a <- ggplot(summ.temp.hours.side.alt.height, aes(x=Time, y=mean, shape=height))+
#geom_smooth(method = "auto" )+
geom_errorbar(aes(ymin =value-sd,ymax = value+sd,y=value,colour=alt.type_height),width=0.3, size=0.4,alpha=.6)+
geom_point(size=3,aes(y=value, colour=alt.type_height , group=alt.type_height)) +
geom_line(aes(y=value,colour=alt.type_height,x=Time, group=alt.type_height))+
ylab(ylab)+ xlab("Time of the day")+
theme_classic()+
scale_colour_manual(name= "Altitude" , values = cols1,labels=c("Highlands\ncanopy", "Highlands\nunderstory","Lowlands\ncanopy","Lowlands\nunderstory"))+
scale_shape_manual(name="Forest layer", labels=c("Canopy", "Understory"), values = c(17,16))+
facet_wrap(~side_andes, labeller=labeller(side_andes = capitalize), strip.position=NULL, ncol = 1)+
facet_rep_wrap(~side_andes, repeat.tick.labels = FALSE,labeller=labeller(side_andes = capitalize), strip.position="right", ncol = 1)+
theme(axis.text.x = element_text(angle = 35,size=10,hjust =1,color=c(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1)),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text.y = element_text(size=10), legend.position = "none",
strip.text = element_blank(),
axis.title=element_text(size=14,face="bold", colour="black")); a
###### 2. fig. 2B BIO2 diurnal range ######
high.more35<-subset(wild.temp.all,value>=35&alt_type=="high")
low.more35<- subset(wild.temp.all,value>=35&alt_type=="low")
b <- ggplot(yearly.daily_temp_max.min.spread.area, aes(x=alt_type, y=temp.range.area.mean, shape=height, colour=alt.type_height))+
geom_line(inherit.aes = FALSE, aes(x=alt_type, y=temp.range.area.mean, shape=height, group=height),linetype = "dashed", colour="grey") +
geom_point(size=3) +facet_wrap(~side_andes)+
geom_point(inherit.aes = FALSE, data=summ.localities, aes(x=alt_type, y=(wc.bio2.annual.diurnal.range/10)),
colour="black", shape=8, size=2, position = position_nudge(x = 0, y = 0))+
geom_errorbar(aes(ymin =temp.range.area.mean-temp.range.area.mean.sd,ymax = temp.range.area.mean+temp.range.area.mean.sd,y=temp.range.area.mean.sd),width=0.03, size=0.3,alpha=.6)+
ylab("Diurnal temperature range (°C)")+ xlab("")+
#scale_colour_manual(name= "Altitude" , values = cols1, labels=c("Highlands","Highlands","Lowlands", "Lowlands"))+
scale_colour_manual(name= "" , values = cols1,labels=c("Highlands\ncanopy\n", "Highlands\nunderstory\n","Lowlands\ncanopy\n","Lowlands\nunderstory\n"))+
scale_shape_manual(name="Forest layer", labels=c("Canopy", "Understory"), values = c(17,16,17,16))+
theme_classic()+
ylim(2.5,11)+
scale_x_discrete(labels=c("Highlands", "Lowlands"))+
facet_wrap(~side_andes, ncol = 1, scales = "free")+
#facet_wrap(~side_andes, labeller=labeller(side_andes = capitalize), strip.position="right", ncol = 1)+
#facet_rep_wrap(~side_andes, repeat.tick.labels = FALSE,labeller=labeller(side_andes = capitalize), strip.position="right", ncol = 1)+
theme(axis.text.x = element_text(angle = 0,size=12,hjust =.5),
axis.line.x = element_line(color="black", size = 0.5),
#strip.text = element_text(size=12, face="bold"),
strip.background = element_blank(),
strip.text.x = element_blank(),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text.y = element_text(size=10),axis.title=element_text(size=12,face="bold", colour="black")); b
###### 3. fig. 2C WC2 gets it wrong #####
cols.low <- alpha(c("grey", "#E69F00","#56B4E9"))
cols.high <- alpha(c("grey","#D55E00", "#0072B2"))
mu <- plyr::ddply(subset(daily.annual.means.per.logger, type=="temp.max"), c("height", "side_andes", "alt_type","alt.type_height"), summarise, grp.mean=mean(mean.value))
c <- ggplot(subset(daily.annual.means.per.logger, type=="temp.max"), aes(x=mean.value, colour=alt.type_height, fill=alt.type_height)) +
geom_vline(data=mu, aes(xintercept=grp.mean, color=alt.type_height),
linetype="dashed", alpha=.6, size=1)+
geom_density(adjust = 1.5, alpha=.6)+
xlab("Maximum temperature (°C)")+
ylab("Probability density")+
facet_rep_wrap(~side_andes+alt_type, scales = "fixed")+
scale_fill_manual(values = c("grey", "#E69F00","#56B4E9","grey", "#D55E00","#0072B2"),
labels=c("WorldClim2","Highlands\ncanopy", "Highlands\nunderstory","WorldClim2","Lowlands\ncanopy","Lowlands\nunderstory"))+
scale_colour_manual(values = c("grey", "#E69F00","#56B4E9","grey", "#D55E00","#0072B2"),
labels=c("WorldClim2","Highlands\ncanopy", "Highlands\nunderstory","WorldClim2","Lowlands\ncanopy","Lowlands\nunderstory"))+
#scale_colour_manual(name= "" , values = ,labels=c("Highlands\ncanopy\n", "Highlands\nunderstory\n","Lowlands\ncanopy\n","Lowlands\nunderstory\n"))+
scale_y_continuous(expand = c(0, 0)) +
xlim(17.8,34)+
#scale_x_continuous(expand = c(0, 0)) +
#facet_rep_wrap(~side_andes+alt_type, repeat.tick.labels = FALSE,labeller=labeller(side_andes = capitalize), strip.position="right", ncol = 2)+
theme_classic()+
theme(axis.text.x = element_text(angle = 0,size=12,hjust =.5),
axis.line.x = element_line(color="black", size = 0.5),
#strip.text = element_text(size=12, face="bold"),
strip.background = element_blank(),
strip.text.x = element_blank(),legend.title = element_blank(),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text.y = element_text(size=10),axis.title=element_text(size=12,face="bold", colour="black")); c
###### combine ######
null <- ggplot()+theme_nothing()
plot_grid(null, a, null, b,c, nrow = 1, rel_widths = c(.05,.9,.05,.75,1.4), rel_heights = c(1,1,1,1,1),
axis = 'b',align = "hv", labels=c("","A","","B", "C"))
ggsave("../figures/fig2.png", width = 14, height = 6, dpi = 300)
########################################## FIGURE 3 #####################
##### prep humi data #######
# create datetime variable for yearly plots
wild.temp.all$date.time <- paste(wild.temp.all$Date, wild.temp.all$Time)
wild.humi.all$date.time <- paste(wild.humi.all$Date, wild.humi.all$Time)
# add variables
wild.temp.all$altitude <- logger.info$point_altitude[match(wild.temp.all$datalogger_id, logger.info$code_mine)]
wild.temp.all$point <- logger.info$point[match(wild.temp.all$datalogger_id, logger.info$code_mine)]
wild.temp.all$point <- logger.info$point[match(wild.temp.all$datalogger_id, logger.info$code_mine)]
wild.temp.all$side.alt <- paste(wild.temp.all$side_andes,wild.temp.all$alt_type, sep = ".")
# add variables
wild.humi.all$altitude <- logger.info$point_altitude[match(wild.humi.all$datalogger_id, logger.info$code_mine)]
wild.humi.all$point <- logger.info$point[match(wild.humi.all$datalogger_id, logger.info$code_mine)]
wild.humi.all$point <- logger.info$point[match(wild.humi.all$datalogger_id, logger.info$code_mine)]
wild.humi.all$side.alt <- paste(wild.humi.all$side_andes,wild.humi.all$alt_type, sep = ".")
# summary stats logger height. altitude
mean(logger.info$logger.height[logger.info$canopy_understory=="c"&!(is.na(logger.info$logger.height))])
mean(logger.info$logger.height[logger.info$canopy_understory=="u"&!(is.na(logger.info$logger.height))])
mean(logger.info$point_altitude[logger.info$alt_type=="high"&!(is.na(logger.info$point_altitude))])
mean(logger.info$point_altitude[logger.info$alt_type=="low"&!(is.na(logger.info$point_altitude))])
display.brewer.pal(n = 8, name ="cbp1")
cbp1 <- c("#999999", "#E69F00", "#D55E00", "#009E73",
"#F0E442", "#0072B2"," #D55E00", "#CC79A7")
##### daily means ####
names(wild.temp.all)
daily_temp_mean <- dplyr::summarise(group_by(wild.temp.all, Date, alt_height_slope, alt_type, side_andes, height, datalogger_id,point,side.alt),
value=mean(value),
sd=mean(sd(value)),
type=paste("temp.mean"))
daily_temp_min <- dplyr::summarise(group_by(wild.temp.all, Date, alt_height_slope, alt_type, side_andes, height, datalogger_id,point,side.alt),
value=min(value),
sd=min(sd(value)),
type=paste("temp.min"))
daily_temp_max <- dplyr::summarise(group_by(wild.temp.all, Date, alt_height_slope, alt_type, side_andes, height, datalogger_id,point,side.alt),
value=max(value),
sd=max(sd(value)),
type=paste("temp.max"))
daily_temp_mean$alt.type_height <- paste(daily_temp_mean$alt_type,daily_temp_mean$height, sep="_")
daily_temp_max$alt.type_height <- paste(daily_temp_max$alt_type,daily_temp_max$height, sep="_")
daily_temp_min$alt.type_height <- paste(daily_temp_min$alt_type,daily_temp_min$height, sep="_")
daily_temp_mean$Date <- as.POSIXct(daily_temp_mean$Date, "%Y-%m-%d", tz="Europe/London")
daily_temp_max$Date <- as.POSIXct(daily_temp_max$Date, "%Y-%m-%d", tz="Europe/London")
daily_temp_min$Date <- as.POSIXct(daily_temp_min$Date, "%Y-%m-%d", tz="Europe/London")
daily <- rbind(daily_temp_max,daily_temp_mean,daily_temp_min)
##### table1 daily #####
head(daily)
daily.summ <- summarise(group_by(daily, alt_type, side_andes, type, height),
tmean=mean(value),
tmean.se= sd(value) / sqrt(length(value))); daily.summ
neworder1 <- c("high","low"); neworder2 <- c("west","east"); neworder3 <- c("temp.max","temp.mean","temp.min")
daily.summ <- plyr::arrange(transform(daily.summ, alt_type=factor(alt_type,levels=neworder1)),
alt_type,side_andes,height,type);daily.summ
daily.summ$tmean <- round(daily.summ$tmean, 2)
daily.summ$tmean.se <- round(daily.summ$tmean.se, 2)
daily.summ$value.c <- paste(daily.summ$tmean, daily.summ$tmean.se, sep="±")
daily.summ$alt.height <- paste(daily.summ$alt_type, daily.summ$height, sep=".")
daily.summ$type.height <- paste(daily.summ$type, daily.summ$height, sep=".")
names(daily.summ)
#daily.summ <- daily.summ[,-c(1,4,5,6)];daily.summ
daily.summ <- daily.summ[,-c(4,3,5,6,8)];daily.summ
daily.summ.spread <- spread(daily.summ, type.height, value.c); daily.summ.spread
write.csv(daily.summ.spread,"data/daily.summ.table.csv", row.names = FALSE)
# humidity
names(wild.humi.all)
daily_hum_min <- dplyr::summarise(group_by(wild.humi.all, Date, alt_height_slope, alt_type, side_andes, height, alt.type_height),
value=min(value),
sd=min(sd(value)),
type=paste("hum.min"))
##### estimate VPD and annual mean / daily max ###############
# we need RH and T in the same place
head(wild.humi.all)
wild.humi.all$value.type
# so add T to RH dataset (and not the other way around, remember we have many more Tonly loggers)
wild.humi.all$temp <- wild.temp.all$value[match(paste(wild.humi.all$date.time,wild.humi.all$datalogger_id), paste(wild.temp.all$date.time,wild.temp.all$datalogger_id))]
names(wild.humi.all)[4] <- "humi"
e <- exp(1) ; e
wild.humi.all$vpd <- ((100-wild.humi.all$humi)/100)*
(6.112*e^((17.67*wild.humi.all$temp)/(wild.humi.all$temp+243.5)))
##### get yearly mean and daily max mean #####
# max, mean min per datalogger per day
daily_humi_mean <- dplyr::summarise(group_by(wild.humi.all, Date, alt_height_slope, alt_type, side_andes, height, datalogger_id),
vpd.mean=mean(vpd),
temp.mean=mean(temp),
humi.mean=mean(humi),
type=paste("humi.mean"))
daily_humi_min <- dplyr::summarise(group_by(wild.humi.all, Date, alt_height_slope, alt_type, side_andes, height, datalogger_id),
vpd=min(vpd),
temp.min=min(temp),
humi.min=min(humi),
type=paste("humi.min"))
daily_humi_max <- dplyr::summarise(group_by(wild.humi.all, Date, alt_height_slope, alt_type, side_andes, height, datalogger_id),
vpd=max(vpd),
temp.max=max(temp),
humi.max=max(humi),
type=paste("humi.max"))
# now do year vpdmax, mean, min, per logger
yearly_daily_humi_mean <- dplyr::summarise(group_by(daily_humi_mean, alt_height_slope, alt_type, side_andes, height, datalogger_id),
vpd.mean.year=mean(vpd.mean),
temp.mean.year=mean(temp.mean),
humi.mean.year=mean(humi.mean),
type=paste("humi.mean"))
yearly_daily_humi_max <- dplyr::summarise(group_by(daily_humi_max, alt_height_slope, alt_type, side_andes, height, datalogger_id),
vpd.max.year=mean(vpd),
temp.max.year=mean(temp.max),
humi.max.year=mean(humi.max),
type=paste("humi.max"))
#mostly 0 for VPD, not very relevant, but RH interesting
yearly_daily_humi_min <- dplyr::summarise(group_by(daily_humi_min, alt_height_slope, alt_type, side_andes, height, datalogger_id),
vpd.min.year=mean(vpd),
temp.min.year=mean(temp.min),
humi.min.year=mean(humi.min),
type=paste("humi.min"))
##### Fig. 3 daily max/mean VPD ###############
names(yearly_daily_humi_mean)
cols1 <- alpha(c("#E69F00","#56B4E9","#D55E00","#0072B2"), 0.9)
yearly_daily_humi_mean$alt_height <- paste(yearly_daily_humi_mean$alt_type,yearly_daily_humi_mean$height, sep="_")
yearly_daily_humi_max$alt_height <- paste(yearly_daily_humi_max$alt_type,yearly_daily_humi_max$height, sep="_")
vpd.mean.p <- ggplot(aes(y=vpd.mean.year, x=alt_type, fill=alt_height), data=yearly_daily_humi_mean)+
#geom_point(aes(fill = alt_height), size = 1, shape = 21, position = position_jitterdodge()) +
geom_boxplot(aes(fill=alt_height))+
stat_cor(show.legend = FALSE)+
stat_compare_means(aes(label = sprintf("p = %5.3f", as.numeric(..p.format..))),method = "t.test")+
#geom_rug(alpha=.05)+
ylim(0,9.5)+
xlab("Altitude")+ ylab(expression(bold("VPD"["mean"])))+
#scale_colour_manual(name="Altitude", values = cols1)+
scale_fill_manual(name="", values = cols1)+
#facet_wrap(~side_andes, labeller=labeller(side_andes = capitalize), strip.position=NULL, ncol = 2)+
# facet_rep_wrap(~side_andes, repeat.tick.labels = FALSE,labeller=labeller(side_andes = capitalize), strip.position="right", ncol = 1)+
theme_classic()+
theme( axis.line=element_blank(),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="italic"),
legend.position = "bottom"); vpd.mean.p
vpd.max.p <- ggplot(aes(y=vpd.max.year, x=alt_type, fill=alt_height), data=yearly_daily_humi_max)+
#geom_point(aes(fill = alt_height), size = 1, shape = 21, position = position_jitterdodge()) +
geom_boxplot(aes(fill=alt_height))+
stat_cor(show.legend = FALSE)+
# stat_compare_means(label="p.signif", method = "t.test")+
stat_compare_means(aes(label = sprintf("p = %5.3f", as.numeric(..p.format..))) ,method = "t.test")+
ylim(0,9.5)+
xlab("Altitude")+ ylab(expression(bold("VPD"["max"])))+
#scale_colour_manual(name="Altitude", values = cols1)+
scale_fill_manual(name="", values = cols1)+
#facet_wrap(~side_andes, labeller=labeller(side_andes = capitalize), strip.position=NULL, ncol = 2)+
# facet_rep_wrap(~side_andes, repeat.tick.labels = FALSE,labeller=labeller(side_andes = capitalize), strip.position="right", ncol = 1)+
theme_classic()+
theme( axis.line=element_blank(),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="italic"),
legend.position = "bottom"); vpd.max.p
plot_grid(vpd.max.p, vpd.mean.p)
ggsave("../figures/fig3.VPD.pdf", width = 7, height = 4, dpi = 300)
########################################## FIGURE 4 #####################
#### data prep ####
ther$altitude <- as.numeric(as.character(ther$altitude))
coll$adult.mass <- as.numeric(as.character(coll$adult.mass))
coll$pupa.clean.mass <- as.numeric(as.character(coll$pupa.clean.mass ))
# convert minss to hms
coll$mins.40.to.ko <- as.hms(as.character(coll$time.40.to.ko))
coll$mins.to.40 <- as.hms(as.character(coll$time.to.40))
coll$mins.ko.total <- as.hms(as.character(coll$time.ko.total))
ther$mins.40.to.ko <- as.hms(as.character(ther$time.40.to.ko))
ther$mins.to.40 <- as.hms(as.character(ther$time.to.40))
ther$mins.ko.total <- as.hms(as.character(ther$time.ko.total))
ther$temp.at.ko <- as.numeric(sub(",", ".", ther$temp.at.ko))
coll$mins.40.to.ko <- as.double.difftime(coll$mins.40.to.ko)/60
coll$mins.to.40 <- as.double.difftime(coll$mins.to.40)/60
coll$mins.ko.total <- as.double.difftime(coll$mins.ko.total)/60
ther$mins.40.to.ko <- as.double.difftime(ther$mins.40.to.ko)/60
ther$mins.to.40 <- as.double.difftime(ther$mins.to.40)/60
ther$mins.ko.total <- as.double.difftime(ther$mins.ko.total)/60
# make mid as low
ther$type.alt1 <- if_else(ther$altitude>700, "high", "low")
coll$type.alt1 <- if_else(coll$mother.alt>700, "high", "low")
#### wild subset #####
# subset by number of indivs, remove species with less than 5 indivs,n=14
ther.wild <- subset(ther, type.reared=="wild")
sp.no <- dplyr::summarise(group_by(ther.wild, species, type.alt1),
mean.alt=mean(altitude),
n=n()); sp.more.5 <- subset(sp.no, n>4)
ther.wild <- subset(ther.wild, paste(species, type.alt1) %in% paste(sp.more.5$species, sp.more.5$type.alt1))
# subset by exp success
ther.wild.tt <- subset(ther.wild, type.reared=="wild"& exp.success=="yes")
######### era for SIZE
# erato residuals controlling for mins to ko, do models SEPARATELY
era.size <- subset(ther, species=="erato"&type.alt1!="")
######### era for TT
# erato residuals controlling for mins to ko, do models SEPARATELY
era.tt <- subset(ther, species=="erato"&type.alt1!=""&mins.40.to.ko!="")
era.tt <- subset(ther, species=="erato"&type.alt1!=""&!(is.na(mins.40.to.ko))&temp.at.ko<41.1) #402-316/329, 86/73 above 41
era.tt.res <- subset(ther, species=="erato"&type.alt1!=""&!(is.na(mins.40.to.ko))&!(is.na(mins.to.40))&temp.at.ko<41.1) #402-316/329, 86/73 above 41
# erato reared residuals -sig
lm1 <- lm(mins.40.to.ko ~ mins.to.40, data=subset(era.tt.res, type.reared=="reared")); summary(lm1)
era.tt.res$mins.ko.residuals[era.tt.res$type.reared=="reared"] <- residuals(lm1)
# erato wild residuals -sig
lm1 <- lm(mins.40.to.ko ~ mins.to.40, data=subset(era.tt.res, type.reared=="wild")); summary(lm1)
era.tt.res$mins.ko.residuals[era.tt.res$type.reared=="wild"] <- residuals(lm1)
######### coll era for TT
# coll era data
coll.era.tt <- subset(coll, exp.success=="yes"&temp.at.ko<41) #203
# erato sub residuals -sig
str(coll.era.sub)
lm1 <- lm(mins.40.to.ko ~ mins.to.40, data=coll.era.tt); summary(lm1)
coll.era.tt$mins.ko.residuals[!(is.na(coll.era.tt$mins.to.40))] <- residuals(lm1)
mean(ther$altitude[!(is.na(ther$altitude))&ther$type.alt1=="low"]); mean(ther$altitude[!(is.na(ther$altitude))&ther$type.alt1=="high"])
#### 1. Fig. 4A ####################
neworder <- c("erato", "timareta", "melpomene", "sara", "clysonymus", "hierax", "telesiphe", "aoede", "elevatus","wallacei")
ther.wild.tt <- plyr::arrange(transform(ther.wild.tt,species=factor(species,levels=neworder)),species)
sp.ther.cols1 <- c("#E69F00", "#CC79A7", "#56B4E9", "#009E73", "#999999", "#999999", "#999999", "#999999", "#999999" ,"#999999")
## at what temperature do high altitude specialists collapse?
## what percentage of high altitude specialists collapse 35-39C?
ther.wild.tt$range <- if_else((ther.wild.tt$species=="erato"|ther.wild.tt$species=="sara"|
ther.wild.tt$species=="melpomene"|ther.wild.tt$species=="timareta"), "wide-range","specialist")
ther.wild.tt$temp.ko.range <- if_else((ther.wild.tt$temp.at.ko<39&ther.wild.tt$temp.at.ko>35), "mid-hot-range","extreme")
mean.temps <- dplyr::summarise(group_by(ther.wild.tt, type.alt1,range,temp.ko.range ),
mean.temp.at.ko=median(temp.at.ko),
min.temp.at.ko=min(temp.at.ko),
max.temp.at.ko=max(temp.at.ko),
n=n(),
se= sd(temp.at.ko) / sqrt(n))
# mean and SE for plot
mean.KO.times<- dplyr::summarise(group_by(ther.wild.tt, type.alt1),
mean.ko=mean(mins.40.to.ko),
sd.ko=sd(mins.40.to.ko),
mean.temp.at.ko=mean(temp.at.ko),
n=n(),
se= sd(mins.40.to.ko) / sqrt(n))
a.wild.tt <- ggplot(ther.wild.tt , aes(x=species, y=mins.40.to.ko,fill=species)) +
geom_boxplot(outlier.shape = NA, coef=0)+
geom_hline(aes(yintercept = mean.ko), linetype="dashed", data=mean.KO.times, colour="darkgrey")+
geom_rect(inherit.aes = FALSE,aes(xmin=0.1,xmax=7.9, ymin=(mean.KO.times$mean.ko-mean.KO.times$se),
ymax=(mean.KO.times$mean.ko+mean.KO.times$se)), data=mean.KO.times, alpha=0.3, fill="red")+
stat_boxplot(geom ='errorbar', width = 0.2, alpha=.8) +
geom_boxplot(outlier.shape = NA, coef=0)+
facet_wrap(~type.alt1, scales = "free_x",labeller =labeller(type.alt1=alt_labeller))+
stat_n_text(size = 4) +
geom_beeswarm(width = 0.1, alpha=.35, size=1.5, cex = .75)+
scale_fill_manual(values = sp.ther.cols1)+
scale_colour_manual(values = sp.ther.cols1)+
scale_x_discrete(labels=c(erato="era", timareta="tim", melpomene="mel",sara="sar",clysonymus="cly", hierax="hie",telesiphe="tel",aoede= "aoe",elevatus="ele",wallacei="wall"))+
xlab("")+ ylab("Knockout time (minutes)")+
theme_classic()+
theme( axis.line=element_blank(),
axis.text.x = element_text(size=14, face="italic"),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="bold"),
legend.position = "none"); a.wild.tt
#### 4. Fig. 4B ####################
ther.wild.tt$status <- 2
min(ther.wild.tt$temp.at.ko)
p.wild<- ggsurvplot(
fit = survfit(Surv(temp.at.ko, status) ~ type.alt1, data = subset(ther.wild.tt, temp.at.ko<49)),
xlab = "Temperature (°C)",
xlim=c(28.5,39),
axes.offset=FALSE,
ylim=c(0,1.03),
ylab = "Knockout resistance probability",
ggtheme = theme( panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line=element_blank(),panel.background = element_blank(),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="italic"),
legend.position = "none"),
conf.int=TRUE,
pval=TRUE,pval.coord = c(28.75,0.05),
palette=c("black", "black"),
linetype = c("dotted", "solid"),
legend.labs = c("Highland","Lowland"),
legend.title=c("Individual altitude"),
break.time.by = 1); p.wild
p.wild.test <- ggsurvplot(
fit = survfit(Surv(as.numeric(mins.40.to.ko), status) ~ type.alt1, data = subset(ther.wild.tt, type.reared=="wild")),
xlab = "Time (minutes)",
#fun = "pct",
ylab = "", size=1,
conf.int=TRUE, pval=TRUE,pval.coord = c(1, .05),
xlim=c(0.3,60),
axes.offset=FALSE,
ylim=c(0,1.03),
ggtheme = theme( panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line=element_blank(),panel.background = element_blank(),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="italic")),
palette=c("black","black"),
linetype = c("dotted", "solid"),
legend.labs = c("Highland","Lowland"),
legend.title=c("Individual altitude"),
break.time.by = 5); p.wild.test
#### combine ###########
bottomrow <- plot_grid(p.wild$plot, p.wild.test$plot, labels = c("B","C"));bottomrow
plot_grid(a.wild.tt, bottomrow , nrow = 2,ncol=1, rel_widths = c(1,.8), rel_heights = c(1,.9), scale = c(1,0.99),
labels=c("A"))
########################################## FIGURE 5 #####################
era.cols <- c("#E69F00","#D55E00")
my_comparisons <- list( c("high", "low"))
neworder <- c("wild","reared")
era.tt <- plyr::arrange(transform(era.tt,type.reared=factor(type.reared,levels=neworder)),type.reared)
# remove mother 20? and mothers from baños butterfly house (1600m.a.s.l)- never included
era.wild.reared.tt.plot <- ggplot(subset(era.tt,mother.id!="20?"&altitude<1500), aes(x=type.alt1, y=mins.40.to.ko, fill=type.alt1)) +
stat_boxplot(geom ='errorbar', width = 0.2, alpha=.8) +
geom_boxplot(outlier.shape = NA, coef=0, alpha=1)+
facet_wrap(~type.reared, scales = "free_x", labeller =labeller(type.reared=capitalize))+
#geom_jitter(width = 0.1, alpha=.35, size=1)+
geom_beeswarm( alpha=.5, size=1.7, cex = 1.7, color="black", fill="black", shape=21)+
stat_n_text(size = 4) +
scale_fill_manual(values = era.cols)+
#stat_compare_means(method="t.test", label = "p.signif", comparisons = my_comparisons)+
xlab("Population")+ ylab("Knockout time (minutes)")+
theme_classic()+
theme( axis.line=element_blank(),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="bold"),
legend.position = "none"); era.wild.reared.tt.plot
ggsave("figures/fig4.KO.era.raw.png",width = 6,height = 5, dpi = 300)
########################################## FIGURE S1 ##########################################
#### packages ######
rm(list=ls())
dev.off()
library(sp)
library(reshape2)
library(vegan)
library(gridExtra)
library(raster)
library(maps)
library(mapdata)
library(googleway)
library(rnaturalearth)
library(rnaturalearthdata)
library(viridis)
require(mapdata)
library(ggspatial)
library(ggrepel)
library(ggplot2)
library(elevatr)
library(sf)
library(sp)
library(ggmap)
library(viridis)
#### 0. data ######
setwd("/Users/gabrielamontejokovacevich/Dropbox (Cambridge University)/PhD/21_paper2/1.microclimates")
logger.info <- read.csv("data/logger.info.csv")
#### 1. fig1 map ###########
world <- ne_countries(scale = "medium", returnclass = "sf")
theme_set(theme_bw())
### SA insert #####
# ec map coords xlim=c(-81,-76), ylim=c(-2,1.5),
ggplot(data = world) +
geom_sf(color="darkgrey") + ylab("Latitude") + xlab("Longitude")+
coord_sf( xlim=c(-88,-65), ylim=c(-10,10), expand = FALSE)+
theme(panel.grid.major = element_line(color = "white", linetype = "dashed", size = 0.5),
panel.background = element_rect(fill = "white"))+
scale_color_viridis(name="Altitude\n(m.a.s.l)")+
scale_y_continuous(breaks=c(-10,0,10))+
scale_x_continuous(breaks=c(-85, -70, -60))+
geom_rect(mapping = aes(xmin=-81, xmax=-76, ymin=-2, ymax=1.5),fill=NA,color="black", alpha=.5)+
annotation_north_arrow(location = "tr", which_north = "true", pad_x = unit(0.1, "in"),
pad_y = unit(0.1, "in"), height = unit(1, "cm"), width = unit(1, "cm"))+
annotation_scale(location = "bl", width_hint = 0.08, line_width = 1)
ggsave("../figures/SOM/SA.insert.png", width = 6, height = 4, dpi = 300)
### Ecuador map ####
head(logger.info)
loc <- dplyr::summarise(group_by(logger.info, alt_type, side_andes),
n=n(),
lat=mean(latitude),
lon=mean(longitude))
# create df input SpatialPoints
loc.df <- data_frame(x=loc$lon, y=loc$lat)
prj_dd <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
# SpatialPoints
loc.sp <- SpatialPoints(loc.df, proj4string = CRS(prj_dd))
## Merging DEMs
elevation_df <- get_elev_raster(loc.sp, prj = prj_dd, z = 8)
## Reprojecting DEM to original projection
elevation_df <- get_elev_raster(loc.sp, prj = prj_dd, z = 6)
jpeg("../figures/SOM/elev.map.points.far.png", width = 15, height = 11.5, units = "cm", res = 300)
plot(elevation_df, col = viridis(20, alpha =.9), xlim=c(-81,-76), ylim=c(-2,1.5),
xlab=c("Longitude"), ylab=c("Latitude"))
plot(loc.sp, add = TRUE, pch=c(24,24,21,21), col="black", bg=alpha("#E69F00",.9), cex=2)
dev.off()
########################################## FIGURE S2 ##########################################
#### 1. s2a - mean ####
mu <- plyr::ddply(subset(daily.annual.means.per.logger, type=="temp.mean"), c("height", "side_andes", "alt_type","alt.type_height"), summarise, grp.mean=mean(mean.value))
s2.a <- ggplot(subset(daily.annual.means.per.logger, type=="temp.mean"), aes(x=mean.value, colour=alt.type_height, fill=alt.type_height)) +
geom_vline(data=mu, aes(xintercept=grp.mean, color=alt.type_height),
linetype="dashed", alpha=.6, size=1)+
geom_density(adjust = 1.5, alpha=.6)+
xlab("Mean temperature (°C)")+
ylab("Probability density")+
facet_rep_wrap(~side_andes+alt_type, scales = "fixed")+
scale_fill_manual(values = c("grey", "#56B4E9","#E69F00","grey", "#0072B2","#D55E00"),
labels=c("WorldClim2","Highlands\ncanopy", "Highlands\nunderstory","WorldClim2","Lowlands\ncanopy","Lowlands\nunderstory"))+
scale_colour_manual(values = c("grey", "#56B4E9","#E69F00","grey", "#0072B2","#D55E00"),
labels=c("WorldClim2","Highlands\ncanopy", "Highlands\nunderstory","WorldClim2","Lowlands\ncanopy","Lowlands\nunderstory"))+
scale_y_continuous(expand = c(0, 0)) +
xlim(18,25)+
theme_classic()+
theme(axis.text.x = element_text(angle = 0,size=12,hjust =.5),
axis.line.x = element_line(color="black", size = 0.5),
#strip.text = element_text(size=12, face="bold"),
strip.background = element_blank(),
strip.text.x = element_blank(),legend.title = element_blank(),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
legend.position = "none",
axis.text.y = element_text(size=10),axis.title=element_text(size=12,face="bold", colour="black")); s2.a
#### 2. s2b - mean ####
mu <- plyr::ddply(subset(daily.annual.means.per.logger, type=="temp.min"), c("height", "side_andes", "alt_type","alt.type_height"), summarise, grp.mean=mean(mean.value))
s2.b <- ggplot(subset(daily.annual.means.per.logger, type=="temp.min"), aes(x=mean.value, colour=alt.type_height, fill=alt.type_height)) +
geom_vline(data=mu, aes(xintercept=grp.mean, color=alt.type_height),
linetype="dashed", alpha=.6, size=1)+
geom_density(adjust = 1.5, alpha=.6)+
xlab("Minimum temperature (°C)")+
ylab("Probability density")+
facet_rep_wrap(~side_andes+alt_type, scales = "fixed")+
scale_fill_manual(values = c("grey", "#56B4E9","#E69F00","grey", "#0072B2","#D55E00"),
labels=c("WorldClim2","Highlands\ncanopy", "Highlands\nunderstory","WorldClim2","Lowlands\ncanopy","Lowlands\nunderstory"))+
scale_colour_manual(values = c("grey", "#56B4E9","#E69F00","grey", "#0072B2","#D55E00"),
labels=c("WorldClim2","Highlands\ncanopy", "Highlands\nunderstory","WorldClim2","Lowlands\ncanopy","Lowlands\nunderstory"))+
scale_y_continuous(expand = c(0, 0)) +
theme_classic()+
theme(axis.text.x = element_text(angle = 0,size=12,hjust =.5),
axis.line.x = element_line(color="black", size = 0.5),
strip.background = element_blank(),
strip.text.x = element_blank(),legend.title = element_blank(),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text.y = element_text(size=10),axis.title=element_text(size=12,face="bold", colour="black")); s2.b
########################################## FIGURE S3 ##########################################
s3.west.dat <- subset(daily_humi_min, side_andes=="west")
s3.west.dat$alt_height_slope <- factor(s3.west.dat$alt_height_slope, levels = c("low_c_west", "low_u_west","high_c_west", "high_u_west"))
s3.west.dat$alt_height <- paste(s3.west.dat$alt_type, s3.west.dat$height, sep = "_")
#prep
x.axis.order <- c("low_c_west","low_u_west","high_c_west","high_u_west")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
alt.aov <-aov(humi.min ~ alt_height_slope, data=s3.west.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(s3.west.dat, alt_height_slope),
mean=mean(humi.min))
final<-merge(labels,yvalue)
#plot
s3.west <- ggplot(s3.west.dat, aes(y=humi.min, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 120))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-2,hjust=0) +
scale_color_manual(limits=x.axis.order,
values=c( "#0072B2","#D55E00", "#56B4E9","#E69F00"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#0072B2","#D55E00", "#56B4E9","#E69F00"), 0.6))+
theme_classic()+
ylab(c(""))+
scale_x_discrete(labels=c("", "", "", ""))+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10),
axis.text.x = element_text(size=10, angle = 25, hjust = 1)); s3.west
## east
s3.east.dat <- subset(daily_humi_min, side_andes=="east")
s3.east.dat$alt_height_slope <- factor(s3.east.dat$alt_height_slope, levels = c("low_c_east", "low_u_east","high_c_east", "high_u_east"))
s3.east.dat$alt_height <- paste(s3.east.dat$alt_type, s3.east.dat$height, sep = "_")
#prep
x.axis.order <- c("low_c_east","low_u_east","high_c_east","high_u_east")
text_high <- textGrob("High A", gp=gpar(fontsize=10, fontface="bold"))
text_low <- textGrob("Low A", gp=gpar(fontsize=10, fontface="bold"))
alt.aov <-aov(humi.min ~ alt_height_slope, data=s3.east.dat); summary(alt.aov) #correct order
TUKEY <- TukeyHSD(x=alt.aov, 'alt_height_slope' , conf.level=0.95);TUKEY
labels<- generate_label_df(TUKEY, "alt_height_slope")
names(labels)<-c('Letters', 'alt_height_slope')
yvalue<-dplyr::summarise(group_by(s3.east.dat, alt_height_slope),
mean=mean(humi.min))
final<-merge(labels,yvalue)
#plot
s3.east <- ggplot(s3.east.dat, aes(y=humi.min, x=alt_height_slope,color=alt_height_slope)) +
geom_boxplot(aes(fill=alt_height_slope))+labs(y="")+labs(x="")+
coord_cartesian(ylim = c(14, 120))+
geom_text(data = final, aes(x = alt_height_slope, y = mean, label = Letters),vjust=-2,hjust=0) +
scale_color_manual(limits=x.axis.order,
values=c( "#0072B2","#D55E00", "#56B4E9","#E69F00"))+
scale_fill_manual(limits=x.axis.order,
values = alpha(c( "#0072B2","#D55E00", "#56B4E9","#E69F00"), 0.6))+
theme_classic()+
ylab(c(""))+
scale_x_discrete(labels=c("Canopy", "Understory", "Canopy", "Understory"))+
theme(legend.position="none")+ # Remove legend
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
plot.margin = unit(c(0,0,1,0), "lines"),
axis.text = element_text(size=10),
axis.text.x = element_text(size=10, angle = 25, hjust = 1)); s3.east
## combine
plot_grid(NULL, s3.west, s3.east , nrow = 3,ncol=1, rel_widths = c(1,1,1), rel_heights = c(1,.9,1), scale = c(1,.95,.95),
labels=c("","A West", "B East"), label_x = .04, label_y = 1.1)
ggsave("../figures/SOM/figs3.png", width = 3, height = 8, dpi = 300)
########################################## FIGURE S4 ##########################################
# relationship between temperature and vpd
cols1 <- alpha(c("gold4","deepskyblue4","gold4","deepskyblue4"), 0.9)
temp.vpd.p <- ggplot(aes(x=vpd, y=temp, colour=alt_type, fill=alt_type), data=wild.humi.all)+
geom_point(alpha=.05,size=.1)+
stat_cor(show.legend = FALSE)+
geom_smooth()+ ylab("Temperature (°C)")+ xlab("Vapour pressure deficit (VPD)")+
scale_colour_manual(name="Altitude", values = cols1)+
scale_fill_manual(name="Altitude", values = cols1)+
# facet_wrap(~side_andes, labeller=labeller(side_andes = capitalize), strip.position=NULL, ncol = 1)+
# facet_rep_wrap(~side_andes, repeat.tick.labels = FALSE,labeller=labeller(side_andes = capitalize), strip.position="right", ncol = 1)+
theme_classic()+
theme( axis.line=element_blank(),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="italic"),
legend.position = "bottom"); temp.vpd.p
ggsave("../figures/SOM/temp.vs.VPD.png")
########################################## FIGURE S5 ##########################################
# to check spread of temperature at KO
ggplot(aes(x=temp.at.ko),data=ther.wild.tt)+
geom_histogram(bins=20)+facet_wrap(~type.alt1, labeller=alt_labeller, scales = "free_y")+
geom_vline(xintercept = c(39,41), linetype="dashed", colour="blue")+
xlab("Temperature at K.O. (°C)")+ylab(c("Count"))+
theme_classic()+
theme( axis.line=element_blank(),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_rect( fill = NA, size = 1),
strip.text = element_text(size=14, face="bold"),
legend.position = "none")
ggsave("figures/SOM/temp.at.KO.png", width = 8, height = 4, dpi = 300)
########################################## FIGURE S6 ##########################################
ther.wild.tt.within <- subset(ther.wild.tt, species=="erato"|species=="melpomene"|species=="sara"|species=="timareta")
sp.ther.cols <- c("#E69F00","#CC79A7","#56B4E9", "#009E73","#999999","#999999","#999999","#999999","#999999","#999999","#999999","#999999")
wild.within.sp.tt <- ggplot(ther.wild.tt.within , aes(x=type.alt1, y=mins.40.to.ko, fill=species)) +
geom_boxplot(outlier.shape = NA, coef=0)+
stat_boxplot(geom ='errorbar', width = 0.2, alpha=.8) +
geom_boxplot(outlier.shape = NA, coef=0)+
scale_fill_manual(values = sp.ther.cols)+
facet_wrap(.~species,labeller =labeller(type.alt1=alt_labeller),ncol = 4)+
#stat_n_text(size = 4, inherit.aes = FALSE, aes(x=type.alt1, y=mins.40.to.ko, fill=species))+
stat_compare_means(method="t.test", inherit.aes = FALSE, aes(x=type.alt1, y=mins.40.to.ko), label = "p.signif",
label.x.npc = 0.5, label.y.npc = .99)+
#stat_pvalue_manual(stat.test, label = "p.adj", y.position = 35)+
geom_beeswarm(width = 0.1, alpha=.35, size=1.5, cex = .75)+
scale_colour_manual(values = sp.ther.cols)+
#(labels=c("H. erato", "H. timareta", "H. melpomene", "H. sara"))+
xlab("Altitude")+ ylab("Knockout time (minutes)")+
theme_classic()+
theme( axis.text.x = element_text(size=14),
axis.text.y = element_text(size=12),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
panel.border = element_blank(),
strip.text = element_text(size=14, face="italic"),
strip.background = element_blank(),
axis.line = element_line(colour = "black"),
legend.position = "none"); wild.within.sp.tt
ggsave("figures/SOM/within.sp.wild.TT.png", width = 8, height = 4, dpi = 300)
########################################## SOM
|
e9300e84c3155605c22bb0dbbeac10cc74419518
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/glamlasso/man/glamlassoRR.Rd
|
17ed7c5de034bb55246898a754241a2bfc1df86d
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 9,262
|
rd
|
glamlassoRR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glamlassoRR.R
\name{glamlassoRR}
\alias{glamlassoRR}
\title{Penalized reduced rank regression in a GLAM}
\usage{
glamlassoRR(X,
Y,
Z = NULL,
family = "gaussian",
penalty = "lasso",
intercept = FALSE,
weights = NULL,
thetainit = NULL,
alphainit = NULL,
nlambda = 100,
lambdaminratio = 1e-04,
lambda = NULL,
penaltyfactor = NULL,
penaltyfactoralpha = NULL,
reltolinner = 1e-07,
reltolouter = 1e-04,
reltolalt = 1e-04,
maxiter = 15000,
steps = 1,
maxiterinner = 3000,
maxiterouter = 25,
maxalt = 10,
btinnermax = 100,
btoutermax = 100,
iwls = "exact",
nu = 1)
}
\arguments{
\item{X}{A list containing the 3 tensor components of the tensor design matrix. These are matrices of sizes \eqn{n_i \times p_i}.}
\item{Y}{The response values, an array of size \eqn{n_1 \times n_2\times n_3}. For option
\code{family = "binomial"} this array must contain the proportion of successes and the
number of trials is then specified as \code{weights} (see below).}
\item{Z}{The non tensor structrured part of the design matrix. A matrix of size \eqn{n_1 n_2 n_3\times q}.
Is set to \code{NULL} as default.}
\item{family}{A string specifying the model family (essentially the response distribution). Possible values
are \code{"gaussian", "binomial", "poisson", "gamma"}.}
\item{penalty}{A string specifying the penalty. Possible values are \code{"lasso", "scad"}.}
\item{intercept}{Logical variable indicating if the model includes an intercept. When \code{intercept = TRUE} the first
coulmn in the non-tensor design component \code{Z} is all 1s. Default is \code{FALSE}.}
\item{weights}{Observation weights, an array of size \eqn{n_1 \times \cdots \times n_d}. For option
\code{family = "binomial"} this array must contain the number of trials and must be provided.}
\item{thetainit}{A list (length 2) containing the initial parameter values for each of the parameter factors.
Default is NULL in which case all parameters are initialized at 0.01.}
\item{alphainit}{A \eqn{q\times 1} vector containing the initial parameter values for the non-tensor parameter.
Default is NULL in which case all parameters are initialized at 0.}
\item{nlambda}{The number of \code{lambda} values.}
\item{lambdaminratio}{The smallest value for \code{lambda}, given as a fraction of
\eqn{\lambda_{max}}; the (data derived) smallest value for which all coefficients are zero.}
\item{lambda}{The sequence of penalty parameters for the regularization path.}
\item{penaltyfactor}{A list of length two containing an array of size \eqn{p_1 \times p_2} and a \eqn{p_3 \times 1} vector.
Multiplied with each element in \code{lambda} to allow differential shrinkage on the (tensor) coefficients blocks.}
\item{penaltyfactoralpha}{A \eqn{q \times 1} vector multiplied with each element in \code{lambda} to allow differential shrinkage on the non-tensor coefficients.}
\item{reltolinner}{The convergence tolerance for the inner loop}
\item{reltolouter}{The convergence tolerance for the outer loop.}
\item{reltolalt}{The convergence tolerance for the alternation loop over the two parameter blocks.}
\item{maxiter}{The maximum number of inner iterations allowed for each \code{lambda}
value, when summing over all outer iterations for said \code{lambda}.}
\item{steps}{The number of steps used in the multi-step adaptive lasso algorithm for non-convex penalties. Automatically set to 1 when \code{penalty = "lasso"}.}
\item{maxiterinner}{The maximum number of inner iterations allowed for each outer iteration.}
\item{maxiterouter}{The maximum number of outer iterations allowed for each lambda.}
\item{maxalt}{The maximum number of alternations over parameter blocks.}
\item{btinnermax}{Maximum number of backtracking steps allowed in each inner iteration. Default is \code{btinnermax = 100}.}
\item{btoutermax}{Maximum number of backtracking steps allowed in each outer iteration. Default is \code{btoutermax = 100}.}
\item{iwls}{A string indicating whether to use the exact iwls weight matrix or use a tensor structured approximation to it.}
\item{nu}{A number between 0 and 1 that controls the step size \eqn{\delta} in the proximal algorithm (inner loop) by
scaling the upper bound \eqn{\hat{L}_h} on the Lipschitz constant \eqn{L_h} (see \cite{Lund et al., 2017}).
For \code{nu = 1} backtracking never occurs and the proximal step size is always \eqn{\delta = 1 / \hat{L}_h}.
For \code{nu = 0} backtracking always occurs and the proximal step size is initially \eqn{\delta = 1}.
For \code{0 < nu < 1} the proximal step size is initially \eqn{\delta = 1/(\nu\hat{L}_h)} and backtracking
is only employed if the objective function does not decrease. A \code{nu} close to 0 gives large step
sizes and presumably more backtracking in the inner loop. The default is \code{nu = 1} and the option is only
used if \code{iwls = "exact"}.}
}
\description{
Efficient design matrix free procedure for fitting large scale penalized reduced rank
regressions in a 3-dimensional generalized linear array model. To obtain a factorization of the parameter array,
the \code{glamlassoRR} function performes a block relaxation scheme within the gdpg algorithm, see \cite{Lund et al., 2017}.
}
\details{
Given the setting from \code{\link{glamlasso}} we place a reduced rank
restriction on the \eqn{p_1\times p_2\times p _3} parameter array \eqn{\Theta} given by
\deqn{\Theta=(\Theta_{i,j,k})_{i,j,k} = (\gamma_{k}\beta_{i,j})_{i,j,k}, \ \ \ \gamma_k,\beta_{i,j}\in \mathcal{R}.}
The \code{glamlassoRR} function solves the PMLE problem by combining a block relaxation scheme with the gdpg algorithm. This scheme alternates between optimizing over the first
parameter block \eqn{\beta=(\beta_{i,j})_{i,j}} and the second block \eqn{\gamma=(\gamma_k)_k} while fixing the second resp. first block. We note that the
individual parameter blocks are only identified up to a multiplicative constant.
}
\examples{
\dontrun{
##size of example
n1 <- 65; n2 <- 26; n3 <- 13; p1 <- 12; p2 <- 6; p3 <- 4
##marginal design matrices (tensor components)
X1 <- matrix(rnorm(n1 * p1), n1, p1)
X2 <- matrix(rnorm(n2 * p2), n2, p2)
X3 <- matrix(rnorm(n3 * p3), n3, p3)
X <- list(X1, X2, X3)
Beta12 <- matrix(rnorm(p1 * p2), p1, p2) * matrix(rbinom(p1 * p2, 1, 0.5), p1, p2)
Beta3 <- matrix(rnorm(p3) * rbinom(p3, 1, 0.5), p3, 1)
Beta <- outer(Beta12, c(Beta3))
Mu <- RH(X3, RH(X2, RH(X1, Beta)))
Y <- array(rnorm(n, Mu), dim = c(n1, n2, n3))
system.time(fit <- glamlassoRR(X, Y))
modelno <- length(fit$lambda)
par(mfrow = c(1, 3))
plot(c(Beta), type = "h")
points(c(Beta))
lines(c(outer(fit$coef12[, modelno], c(fit$coef3[, modelno]))), col = "red", type = "h")
plot(c(Beta12), ylim = range(Beta12, fit$coef12[, modelno]), type = "h")
points(c(Beta12))
lines(fit$coef12[, modelno], col = "red", type = "h")
plot(c(Beta3), ylim = range(Beta3, fit$coef3[, modelno]), type = "h")
points(c(Beta3))
lines(fit$coef3[, modelno], col = "red", type = "h")
###with non tensor design component Z
q <- 5
alpha <- matrix(rnorm(q)) * rbinom(q, 1, 0.5)
Z <- matrix(rnorm(n1 * n2 * n3 * q), n1 * n2 * n3, q)
Y <- array(rnorm(n1 * n2 * n3, Mu + array(Z \%*\% alpha, c(n1, n2, n3))), c(n1, n2, n3))
system.time(fit <- glamlassoRR(X, Y, Z))
modelno <- length(fit$lambda)
par(mfrow = c(2, 2))
plot(c(Beta), type = "h")
points(c(Beta))
lines(c(outer(fit$coef12[, modelno], c(fit$coef3[, modelno]))), col = "red", type = "h")
plot(c(Beta12), ylim = range(Beta12,fit$coef12[, modelno]), type = "h")
points(c(Beta12))
lines(fit$coef12[, modelno], col = "red", type = "h")
plot(c(Beta3), ylim = range(Beta3, fit$coef3[, modelno]), type = "h")
points(c(Beta3))
lines(fit$coef3[, modelno], col = "red", type = "h")
plot(c(alpha), ylim = range(alpha, fit$alpha[, modelno]), type = "h")
points(c(alpha))
lines(fit$alpha[, modelno], col = "red", type = "h")
################ poisson example
Beta12 <- matrix(rnorm(p1 * p2, 0, 0.5), p1, p2) * matrix(rbinom(p1 * p2, 1, 0.1), p1, p2)
Beta3 <- matrix(rnorm(p3, 0, 0.5) * rbinom(p3, 1, 0.5), p3, 1)
Beta <- outer(Beta12, c(Beta3))
Mu <- RH(X3, RH(X2, RH(X1, Beta)))
Y <- array(rpois(n1 * n2 * n3, exp(Mu)), dim = c(n1, n2, n3))
system.time(fit <- glamlassoRR(X, Y, family = "poisson"))
modelno <- length(fit$lambda)
par(mfrow = c(1, 3))
plot(c(Beta), type = "h")
points(c(Beta))
lines(c(outer(fit$coef12[, modelno], c(fit$coef3[, modelno]))), col = "red", type = "h")
plot(c(Beta12), ylim = range(Beta12, fit$coef12[, modelno]), type = "h")
points(c(Beta12))
lines(fit$coef12[, modelno], col = "red", type = "h")
plot(c(Beta3), ylim = range(Beta3, fit$coef3[, modelno]), type = "h")
points(c(Beta3))
lines(fit$coef3[, modelno], col = "red", type = "h")
}
}
\references{
Lund, A. and N. R. Hansen (2017). Sparse Network Estimation for Dynamical Spatio-temporal Array Models.
\emph{ArXiv}.
}
\author{
Adam Lund
Maintainer: Adam Lund, \email{adam.lund@math.ku.dk}
}
|
fb9134c553977d5752630c7b51696fa553d5a50d
|
fc7a76422c87efc0b48ee58aa0bcb881f1511cbe
|
/R Code/Classification Model/Parkinsons Dataset/AllMethods.R
|
8558a33f8137d6cc60835558e38b371217329998
|
[] |
no_license
|
Sam-Malpass/Data-Analytics-and-Mining
|
9d551da5325ef9f17d05dc54f1370a20da0206a2
|
62a627bc39f4a959e6881f1a49056791e9a36cb7
|
refs/heads/master
| 2022-03-23T14:08:25.454576
| 2019-12-03T20:44:38
| 2019-12-03T20:44:38
| 225,706,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,900
|
r
|
AllMethods.R
|
# Resubstitution
library(rpart)
all_accuracies<-c()
all_devs<-c()
input_data<-read.csv("C:/Users/sam/Desktop/DM Coursework Data/parkinsons.data", header=TRUE)
input_data<-subset(input_data, select=-c(name))
numRecords<-length(input_data[[1]])
numTrials<-20
accuracies<-c()
for(i in 1:numTrials)
{
training_set<-input_data[sample(nrow(input_data), numRecords*0.9),]
test_set<-training_set
decision_tree = rpart(status~., data=training_set, method='class')
prediction<-predict(decision_tree, newdata=test_set[-17], type='class')
confMat<-table(test_set$status, prediction)
accuracies<-append(accuracies, sum(diag(confMat))/sum(confMat))
}
mean_accuracy<-sum(accuracies)/length(accuracies)
std_deviation<-sd(accuracies)
all_accuracies<-append(all_accuracies, mean_accuracy)
all_devs<-append(all_devs, std_deviation)
input_data<-read.csv("C:/Users/sam/Desktop/DM Coursework Data/parkinsons.data", header=TRUE)
input_data<-subset(input_data, select=-c(name))
numRecords<-length(input_data[[1]])
numTrials<-20
accuracies<-c()
for(i in 1:numTrials)
{
sample<-sample.int(n=nrow(input_data), size=numRecords*0.9, replace=FALSE)
training_set<-input_data[sample,]
test_set<-input_data[-sample,]
decision_tree = rpart(status~., data=training_set, method='class')
prediction<-predict(decision_tree, newdata=test_set[-17], type='class')
confMat<-table(test_set$status, prediction)
accuracies<-append(accuracies, sum(diag(confMat))/sum(confMat))
}
mean_accuracy<-sum(accuracies)/length(accuracies)
std_deviation<-sd(accuracies)
all_accuracies<-append(all_accuracies, mean_accuracy)
all_devs<-append(all_devs, std_deviation)
input_data<-read.csv("C:/Users/sam/Desktop/DM Coursework Data/parkinsons.data", header=TRUE)
input_data<-subset(input_data, select=-c(name))
numRecords<-length(input_data[[1]])
numTrials<-20
numFolds<-10
accuracies<-c()
for(val in 1:numTrials)
{
shuffled_indices<-sample(nrow(input_data))
shuffled_data<-input_data[shuffled_indices,]
correct_predictions<-0
folds<-cut(seq(1,numRecords), breaks=numFolds, labels=FALSE)
for(i in 1:numFolds)
{
sample<-which(folds==i, arr.ind=TRUE)
test_set<-shuffled_data[sample,]
training_set<-shuffled_data[-sample,]
decision_tree = rpart(status~., data=training_set, method='class')
prediction<-predict(decision_tree, newdata=test_set[-17], type='class')
confMat<-table(test_set$status, prediction)
correct_predictions<-correct_predictions + sum(diag(confMat))
}
accuracies<-append(accuracies, correct_predictions / numRecords)
}
mean_accuracy<-sum(accuracies)/length(accuracies)
std_deviation<-sd(accuracies)
all_accuracies<-append(all_accuracies, mean_accuracy)
all_devs<-append(all_devs, std_deviation)
input_data<-read.csv("C:/Users/sam/Desktop/DM Coursework Data/parkinsons.data", header=TRUE)
input_data<-subset(input_data, select=-c(name))
numRecords<-length(input_data[[1]])
numTrials<-20
sample_size<-numRecords * 0.9 + 1
accuracies<-c()
for(trial in 1:numTrials)
{
sample<-sample.int(n=nrow(input_data), size=sample_size, replace=FALSE)
main_training_set<-input_data[sample,]
correct_predictions<-0
for(i in 1:sample_size)
{
row<-main_training_set[i,]
training_set<-main_training_set[-i,]
decision_tree = rpart(status~., data=training_set, method='class')
prediction<-predict(decision_tree, newdata=row[-17], type='class')
predict<-0
if(row$status == prediction)
{
predict<-1
}
correct_predictions<- correct_predictions + predict
}
accuracies<-append(accuracies, (correct_predictions / sample_size))
}
mean_accuracy<-sum(accuracies)/length(accuracies)
std_deviation<-sd(accuracies)
all_accuracies<-append(all_accuracies, mean_accuracy)
all_devs<-append(all_devs, std_deviation)
combined<-rbind(all_accuracies, all_devs)
xlab<-c("Resubstitution","Holdout","10f-XValidation","LOOCV")
colnames(combined)<-xlab
barplot(combined, beside=TRUE, main="Parkinsons")
|
e556ebd8115d83c95f58ded54a274e1a39b24e3f
|
fb6416b9ded37fb3a00709280e5577605b3a1f7d
|
/R/utils_classify.R
|
e46877a230358348c11a67e6eb99a788e16b0f78
|
[
"Artistic-2.0"
] |
permissive
|
nlawlor/powsimR
|
5c6d31bd3e2fdbad9cfc61fa6f253cb5c5d27f96
|
326cd1cc3d1b885c5ad3cbaaf3c2a3c28ebd7cf8
|
refs/heads/master
| 2020-03-20T15:42:26.677961
| 2018-06-15T18:31:08
| 2018-06-15T18:31:08
| 137,519,673
| 0
| 0
| null | 2018-06-15T18:22:44
| 2018-06-15T18:22:43
| null |
UTF-8
|
R
| false
| false
| 2,202
|
r
|
utils_classify.R
|
# WRAPPER FOR CELL GROUP CLASSIFICATION -----------------------------------
## NOTE: minclust has adjusted rand index implemented
# TODO: put a counter of how many HVG were actually found (absolute and relative to total # genes!)
# NULL always means skip this step
.classify.calc <- function(Pipeline,
GeneSelect,
DimReduce,
ClustMethod,
clustNumber,
normData,
Lengths,
MeanFragLengths,
countData,
spikeData,
spikeInfo,
spikeIns,
NCores,
verbose) {
if(is.null(Pipeline)) {
# 1. expression matrix calculation
exprData <- .expr.calc(countData=countData,
normData=normData,
Lengths=Lengths,
MeanFragLengths=MeanFragLengths,
group=NULL)
exprData <- log2(exprData+1)
# 2. feature selection
FeatureSelect <- .feature.select(GeneSelect=GeneSelect,
countData=countData,
normData=normData,
exprData=exprData,
spikeData=spikeData,
spikeInfo=spikeInfo,
spikeIns=spikeIns,
verbose=verbose)
# 3. dimensionality reduction
DimData <- .dim.reduce(DimReduce,
FeatureSelect)
# 4. clustering
ClusterData <- .cluster.calc(DimData,
ClustMethod,
clustNumber,
verbose)
}
if(!is.null(Pipeline) && Pipeline=="CIDR_free") {
ClusterData <- .cidrfree.calc(countData, NCores)
}
if(!is.null(Pipeline) && Pipeline=="CIDR_bound") {
ClusterData <- .cidrbound.calc(countData, clustNumber, NCores)
}
return(ClusterData)
}
|
51ace54d6cec713a99a818d1d3e82dabe2a8ed08
|
b7e1d6a5ffc9f5e80614ec6eb987cde629765c49
|
/HeatMapProject.R
|
d798d120cd8f32a8038da8fb1bb7c66a5a970920
|
[] |
no_license
|
tristanjkaiser/Heatmap_rWorldMap
|
2cc64001f7e4d87c3fe9e31615d8b14c4c0bd3f2
|
13e55632c714f58547a1063a83c89e59815181f9
|
refs/heads/master
| 2021-09-16T14:04:51.472735
| 2018-06-21T16:55:28
| 2018-06-21T16:55:28
| 67,171,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,604
|
r
|
HeatMapProject.R
|
# First HeatMap exploration
# By: Tristan Kaiser
# 09/01/2016
library(readr)
library(rworldmap)
library(RColorBrewer)
library(classInt)
# Load data ---------------------------
df <- read.csv('./Data/Metal_Data.csv', header = TRUE, na.strings=c("", NA))
# Clean data -------------------------
# Convert Percentage column to numeric
# You will run into a lot of issues here if the CSV file has a funky format
df$Percentage <- as.numeric(sub("%", "", df$Percentage))
# Build map -------------------------
# Join data to map
dfMap <- joinCountryData2Map(df
,joinCode = "ISO3"
,nameJoinColumn = "CountryCode"
,verbose = TRUE)
# Set Colours
colourChoice <- palette(c("white", "#fee5d9", "#fcbba1", "#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#990000"))
# Create categories for data
# Breaks indicate the colour categories
dfMap@data[["percentage.category"]] <- cut(dfMap@data[["Percentage"]],
breaks = c(-1, 0, 2, 5, 10, 20, 40, 60, 100),
include.lowest = TRUE, na.rm = TRUE)
# Label the colour categories
levels(dfMap@data[["percentage.category"]]) <- c("0%", "<2%",
"=>2% - <5%",
">=5% - <10%",
">=10% - <20%",
">=20% - <40%",
">=40% - <60%",
">=60% - 100%")
# Open the Map Viewer
# Only works in Windows
mapDevice("x11")
# Plot the data categories
mapParams <- mapCountryData(dfMap,
nameColumnToPlot = 'percentage.category',
catMethod = 'categorical',
mapTitle = 'Global Copper Production 2013 - 18,500,000 tonnes',
addLegend = FALSE,
colourPalette = colourChoice,
oceanCol = 'lightBlue',
missingCountryCol = 'white')
# Add the Legend -------------------------------
# Current settings worked on X11 viewer
# Adjust cex until the box fits properly
mapParams$legendText <- levels(dfMap@data[["percentage.category"]])
do.call(addMapLegendBoxes, c(mapParams,x = 'bottomleft',
cex = .6,
title = "% of World Production",
horiz = FALSE))
# Modify these values until the source & box align correctly
text(111,-85, "Source: USGS & Kaiser Research", cex = .8)
rect(67,-80, 155, -90)
|
a4fe18a88d57043eb41269bcc28d72ca33e97594
|
f697cbbb0da988fd43c07652d5955fbb82e25e38
|
/GoViewer/R/wideDirectionFormat.r
|
994c56bd35efd7bba160f87d112895678acc2d89
|
[] |
no_license
|
aidanmacnamara/epiView
|
eec75c81b8c7d6b38c8b41aece3e67ae3053fd1c
|
b3356f6361fcda6d43bf3acce16b2436840d1047
|
refs/heads/master
| 2021-05-07T21:22:27.885143
| 2020-06-22T13:08:51
| 2020-06-22T13:08:51
| 109,008,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
wideDirectionFormat.r
|
#' prepares projected direction observations to reconstruct a direction vector
#'
#' In an attempt to guess the direction in a higher dimensional PCA space, the
#' user selects the direction in one or combinations of PCA components. This
#' code takes the output captured from the GUI and coverts it in to a simpler form
#' suitable for checks and calculations. For more details, see the example
#'
#' @param n the number of dimensions or PCs to be considered
#' @param x a data frame of paired PC coordinates and x and y displacements
#'
#' @return a data frame of the type produced by \link{makeW}
#' @export
#'
#' @examples
#'
#' # example of use. A vector in 6-space with two observed planes
#'
#' n=6
#'
#' dirDefined=data.frame(i=c(1,1),j=c(2,4),x=c(8,10),y=c(-7,2))
#' dirDefined
#'
#' w=wideDirectionFormat(n,dirDefined)
#' w
#'
#' a=checkPcaViewConnectivity(w)
#' a
#'
#' d1=findDirection(w[a$selected,])
#' d1
#'
#' d2=rep(0,n)
#' d2[a$selected]=d1
#' d2
wideDirectionFormat=function(n,x){
w=array(NA,dim=c(n,nrow(x)))
for (i in 1:nrow(x)){
w[x[i,"i"],i]=x[i,"x"]
w[x[i,"j"],i]=x[i,"y"]
}
w
}
|
25eb5c9b73f0373843a936ca9b64f2d3cff3554c
|
1d2e96ba9f11671e0f422ad56932d4e101941cf4
|
/man/topo2.Rd
|
b4dd110c8481e15c35a9668baa3b71d7ae96b3b3
|
[] |
no_license
|
cran/ocedata
|
65642b611b59cea252d59f45950cf084b2d60a09
|
7f152f7b7a357900f720239cbb0dcca93045808c
|
refs/heads/master
| 2022-08-23T19:45:28.151629
| 2022-08-19T08:40:02
| 2022-08-19T08:40:02
| 19,524,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 928
|
rd
|
topo2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topo2.R
\docType{data}
\name{topo2}
\alias{topo2}
\title{World topography on a 2-degree grid}
\usage{
data(topo2, package="ocedata")
}
\description{
\code{topo2} is a matrix containing world topography data, on a 2-degree grid.
This is provided for occasions where the higher resolution topography in
\code{topoWorld} from the Oce package is not needed. See \dQuote{Examples} for a
plot that illustrates the longitude and latitude grid for the data.
}
\details{
The data are calculated by applying \code{decimate()} to the \code{topoWorld}
dataset from the \CRANpkg{oce} package, followed by extraction of the \code{"z"} value.
}
\examples{
# Coastline and 2 km isobath
data(topo2, package="ocedata")
lon <- seq(-179.5, 178.5, by=2)
lat <- seq(-89.5, 88.5, by=2)
contour(lon, lat, topo2, asp=1, drawlabels=FALSE,
level=c(0,-2000), col=c(1,2))
}
|
bb9dd54a2b950328706f08ac6e986ebe20bce492
|
89f09607d51b2552f05465d7325e235fe376ba19
|
/finestructure/Weighted_parameters.R
|
e6d9b08dfea8686f5d7dc30afabedacaae854144
|
[] |
no_license
|
weigelworld/north_american_arabidopsi
|
d0e68bcd749ffa4634c154e884d49e05d807b777
|
00cb7750fe04e1246d2bcb3bd058cca23b3d8f03
|
refs/heads/master
| 2023-05-30T12:37:11.772765
| 2021-06-10T05:00:36
| 2021-06-10T05:00:36
| 375,575,979
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 472
|
r
|
Weighted_parameters.R
|
setwd('/ebio/abt6_projects9/Methylome_variation_HPG1/origins/post_vcf1/shapeit_noQD30/combinePops/EurasianOnly')
parameters <- read.table('EurasianOnly.parameter_summary.txt', header = T)
head(parameters)
parameters$sum_chr_Ne <- parameters$N_SNPs*parameters$Ne
parameters$sum_chr_Mu <- parameters$N_SNPs*parameters$Mu
weighted_Ne = sum(parameters$sum_chr_Ne)/sum(parameters$N_SNPs)
weighted_Mu = sum(parameters$sum_chr_Mu)/sum(parameters$N_SNPs)
weighted_Ne
weighted_Mu
|
83ee165734b3c43e6dc95d13c9147d4aaa3fb7b8
|
84936791d8a9f5686ea68ac96efdf851dc7b5fcf
|
/R/bpw_wg1.R
|
8bbf55d4c8bdb00c4e8f98ff2d71801f7523fc81
|
[] |
no_license
|
petzi53/austRia
|
3cf128648831332a57a3e4d4812bc64dd473b3e4
|
c8300bd9d2348166d51ca1df9fb39a86c95a43e7
|
refs/heads/master
| 2021-01-11T22:30:54.103082
| 2016-10-30T16:04:45
| 2016-10-30T16:04:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
r
|
bpw_wg1.R
|
#' Bundespräsidentenwahl Österreich 2016 (Austrian presidential election)
#'
#' This dataset contains the number of votes for each candidate per municipality
#' for the first round of voting of the Austrian presidential election 2016.
#'
#' @source \url{http://www.bmi.gv.at/cms/BMI_wahlen/bundespraes/bpw_2016/Ergebnis.aspx}
#' @format A data frame with 2239 observations and 12 variables:
#' \describe{
#' \item{gkz}{Community Identification Number}
#' \item{gebietsname}{Name of the municipality}
#' \item{wahlberechtigte}{Nr. of people who were allowed to vote}
#' \item{abgegebene}{Nr. of people who cast a vote}
#' \item{ungueltige}{Nr. of invalid votes}
#' \item{gueltige}{Nr. of valid votes}
#' \item{griss}{Nr. of votes for Irmgard Griss}
#' \item{hofer}{Nr. of votes for Norbert Hofer}
#' \item{hundstorfer}{Nr. of votes for Rudolf Hundstorfer}
#' \item{khol}{Nr. of votes for Andreas Khol}
#' \item{lugner}{Nr. of votes for Richard Lugner}
#' \item{vanderbellen}{Nr. of votes for Alexander van der Bellen}
#' }
"bpw_wg1"
|
d90b14d196155e698c69b9e590ff648904c2874c
|
81fe9027382cf52464ec0bc66ed54e9b27ba3c8e
|
/cachematrix.R
|
2cff2117eac17b3a73777ed247e4a2d46de8fa9e
|
[] |
no_license
|
minnatwang/ProgrammingAssignment2
|
3a52bc5136a1d1da0e2c8b9c962ca4f4a85a53b4
|
6040cec935dd3bb06283fceea6293042421031b9
|
refs/heads/master
| 2021-01-15T16:51:44.359340
| 2017-08-08T20:19:57
| 2017-08-08T20:19:57
| 99,729,596
| 0
| 0
| null | 2017-08-08T19:39:14
| 2017-08-08T19:39:13
| null |
UTF-8
|
R
| false
| false
| 809
|
r
|
cachematrix.R
|
## Cache the inverse of a matrix for faster inverse retrieval in the future
## Create functions to set and get both the matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inv1) inv <<- inv1
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse either by calculating it or retrieving it from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
1275e62a549fa89c18260232bcf2284ff923f6e6
|
1a4db22d3a7702c38b61c9d4700c6a3b94d145ba
|
/PER/plm.R
|
9b4c4f3917fae9e977e640b4e39d7193a8d15950
|
[] |
no_license
|
koki25ando/hatenablog_code
|
316e99eff210169e70773d62d355fb7ca8b7eea0
|
464309485a3fe21ad21335c264b9baa9597bb354
|
refs/heads/master
| 2020-12-08T10:27:19.638923
| 2020-07-12T13:45:35
| 2020-07-12T13:45:35
| 232,957,511
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 676
|
r
|
plm.R
|
# https://nigimitama.hatenablog.jp/entry/2018/11/05/021516
setwd("/Users/KokiAndo/Desktop/R/hatenablog/PER")
pacman::p_load(tidyverse, data.table, naniar, plm)
stats = fread("Seasons_Stats.csv")
per_dat = stats %>%
filter(Pos %in% c("PG", "SG", "SF", "PF", "C")) %>%
filter(Year > 2000 & G > 20 & Age < 40 & Age > 20) %>%
mutate(Pos_category = ifelse(Pos %in% c("PG", "SG"), "BackCourt", "FrontCourt")) %>%
select(Player, Pos, Age, Pos_category, PER)
str(per_dat)
per_pdf <- pdata.frame(per_dat, index = c("Player"), drop.index=TRUE)
head(per_pdf)
plm_model = plm(PER ~ Pos_category, data = per_pdf, method = "within", effect = "individual")
summary(plm_model)
|
229be14da74f570bc8be6cecc699f2f83434d404
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.cloudwatchlogs/man/test_metric_filter.Rd
|
dfa000b5028ab79f3fc8face670f3c370f96cb03
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 760
|
rd
|
test_metric_filter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudwatchlogs_operations.R
\name{test_metric_filter}
\alias{test_metric_filter}
\title{Tests the filter pattern of a metric filter against a sample of log event messages}
\usage{
test_metric_filter(filterPattern, logEventMessages)
}
\arguments{
\item{filterPattern}{[required]}
\item{logEventMessages}{[required] The log event messages to test.}
}
\description{
Tests the filter pattern of a metric filter against a sample of log event messages. You can use this operation to validate the correctness of a metric filter pattern.
}
\section{Accepted Parameters}{
\preformatted{test_metric_filter(
filterPattern = "string",
logEventMessages = list(
"string"
)
)
}
}
|
e826aa39ab70e70e95db234743fa06482cb4ca4d
|
c9d5c137e1a3c11175f002c1e159a5e091ea6efc
|
/dataanalysis/analysis/topic-modeling/04bstmSelectModelsK20.R
|
959099be3e9b1a4ef574d172664484e50bd0b6c0
|
[] |
no_license
|
margaretfoster/WTO
|
955cb80ee70e0fc527c89dd20da68db006452078
|
476ba4a2821b2929c8310f1058ac847a904a30ce
|
refs/heads/master
| 2022-06-22T11:38:02.944583
| 2022-06-16T12:40:58
| 2022-06-16T12:40:58
| 139,746,359
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,516
|
r
|
04bstmSelectModelsK20.R
|
#########
### Model select K20
#########
rm(list=ls())
loadPkg=function(toLoad){
for(lib in toLoad){
if(! lib %in% installed.packages()[,1])
{ install.packages(lib, repos='http://cran.rstudio.com/') }
suppressMessages( library(lib, character.only=TRUE) ) }}
packs <- c('tm', 'stm', 'pdftools',
'tidyr', 'quanteda', "wbstats")
loadPkg(packs)
#########################
## Declare Data Paths
#########################
if(Sys.info()['user']=="Ergane"){ ## if on my own machine look in Dropbox
dataPathDesktop <- "~/Dropbox/WTO/rdatas/"
print(paste0("The datapath is: ", dataPathDesktop))
}else{ ## else look in ~/WTO/
dataPathDesktop <- "../../"
print(paste0("The datapath is: ", dataPathDesktop))
}
#######################
## Load Processed Data
#######################
load(paste0(dataPathDesktop,"processedTextforSTM.Rdata"))
#############################
##### Analysis
#############################
#############################
#### Model Search K=20
#############################
set.seed(61920)
mod.select.20 <- stm::selectModel(documents=docs,
vocab=vocab,
data=meta,
K=20, ## K20
prevalence= ~ s(numdate) +
as.factor(income_level_iso3c),
seed=61920)
save(mod.select.20,
file=paste0(dataPathDesktop,
"tradDevModSelect_20.RData"))
|
b68f693e7bcf0529578254a05149abd962c1dd10
|
1c7ed6ecdf6101a1d68e8106309283e7bb325af3
|
/man/commSimul.Rd
|
a6e9be36fe87c8472710283178e93e860757e77e
|
[] |
no_license
|
guiblanchet/countComm
|
5aec6c61ad32ff75c9a63b8178c252fa14d78542
|
b338009feba8789f898c800963229822e8401e6a
|
refs/heads/master
| 2022-09-11T16:10:14.346773
| 2020-06-02T19:33:38
| 2020-06-02T19:33:38
| 268,888,496
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,491
|
rd
|
commSimul.Rd
|
\name{commSimul}
\alias{commSimul}
\title{
Community simulation function
}
\description{
Simulate a community following the procedure proposed in Blanchet et al. (in press).
}
\usage{
commSimul(nquad,nsp, nind,patchrad=0.2,SAD="lognormal",sdlog=5)
}
\arguments{
\item{nquad}{Number of quadrates in which to divide the sampling area. This should be equal to any integer squared. (See Details).}
\item{nsp}{Integer defining the number of species to be simulated.}
\item{nind}{Integer defining the maximum number of individuals for each species. Note that if SAD = "uniform", this value will be the same for each species. (See Details).}
\item{patchrad}{A numeric value defining the maximum radius of a patch for a group of individuals of a single species. (See Details).}
\item{SAD}{Character string defining the type species-abundance distribution for the community. Either "lognormal", "bstick", "uniform".}
\item{sdlog}{This argument is active only if SAD= "lognormal". It defines the standard deviation of the lognormal distribution.}
}
\details{The number of quadrates in the sampling area (a square of unit size) is defined by dividing the area horizontally and vertically so that each quadrate is proportionally equal to the size of the sampling area. For this reason, nquad should be an integer that can be obtain from any solution of x^2. If it is not the case, a value will be calculated that approximate the desired value by rounding the square root nquad and putting it to the power of 2.
Depending on the way individuals are distributed in the sampling area (spatagg), the number of individuals per species defined by 'nind' may varies because the method use to sample individual relies on random point patterns and the value given is the average of the random point process.
Note that when defining the patch radius through patchrad, the patch radius is sampled from a uniform distribution ranging from 0.01 to patchrad. Also, since all simulations are carried out in a square unit size, defining patchrad with a value larger than 1 is ill advised. The argument patchrad defines patch radius for individuals of each species simulated so that the patch size associated to different species within the same community can vary within the predefine range.
}
\value{
A sites (rows) by species (columns) community matrix.
}
\author{
F. Guillaume Blanchet
}
\examples{
comm1<-commSimul(10^2,20, 2000,patchrad=0.2,SAD="lognormal",sdlog=5)
}
\keyword{ multivariate }
|
eab3f2532633e27f07736498e03393e373a4df91
|
14161ca2db029ea5850d916b6ca7fb959e2c3f3d
|
/wine.R
|
9da70d0abad38abdc255376338b27a905150c4d4
|
[] |
no_license
|
pochuan/stats315a
|
dea23679c76893155b3e336097e7208ab5a7ecff
|
c50f3377b9009a0dedd1778d2dbfdc1cbbb893f1
|
refs/heads/master
| 2016-09-06T02:22:58.617929
| 2014-03-12T07:39:42
| 2014-03-12T07:39:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,719
|
r
|
wine.R
|
# load data with: wine.train <- read.csv("wine.train.csv", header=TRUE)
cv.error <- function(model.fn, formula, num.folds, train.data) {
N = nrow(train.data);
fold.size <- floor(N/num.folds);
error <- 0.0
fold.begin <- 1; fold.end <- floor(N/num.folds);
for (i in 1:num.folds) {
# train model on all-but-i^th fold
model.i <- ifelse(identical(model.fn,glmnet),
model.fn(formula,family="gaussian",weights = rep(1,nrow(train.data[-(fold.begin:fold.end),1:12])),x=train.data[-(fold.begin:fold.end),1:12], y = train.data[-(fold.begin:fold.end),]$quality),
model.fn(formula, train.data[-(fold.begin:fold.end),]));
# add in i^th fold training error
predict.i <- predict(model.i, train.data[fold.begin:fold.end,1:12]);
if (class(predict.i) == "list") {
# used when model is LDA
# TODO: maybe incorporate LDA class posterior probabilities into prediction instead of using MAP
# assignments? Seems fairer in comparison to lm that way.
predict.i <- as.numeric(predict.i$class);
}
error <- error + sum(abs(predict.i-train.data[fold.begin:fold.end,]$quality));
# update fold.begin and fold.end
fold.begin <- fold.end + 1;
fold.end <- fold.begin + fold.size - 1;
}
return(error);
}
# models to try:
# ordinariy lm (quality ~ .; make sure to treat color as a factor)
# factor response for quality (how to do this?)
# LDA/QDA
# kNN
# regularization? maybe LASSO for variable selection? or, forward step-wise with AIC?
# Trees
# uncomment the following to test ordinary linear model, LDA respectively
# cv.error(lm, quality ~ ., 10, wine.train)
# cv.error(lda, as.factor(quality) ~ ., 10, wine.train)
|
a8271087e9ef1543dc0ab62ff45f68e179683390
|
22a5979ae04326d13c4e4b13f4f1d72748ab412e
|
/R/rlognormal.R
|
65e7323a3b590967ec44c3e1c4c83e0751b0e6f8
|
[] |
no_license
|
guochunshen/sce
|
7c719bb0ccf58d73310935f0650a6e0a92d0e037
|
206cd176ef23eebb9daec3380f5a9f65531a65d4
|
refs/heads/master
| 2021-05-04T14:04:16.579562
| 2013-06-26T07:14:06
| 2013-06-26T07:14:06
| 6,288,742
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 465
|
r
|
rlognormal.R
|
#'
#' generate a lognormal species abundance distribution
#'
#' @param N total number of individual
#' @param S total number of species
#' @param meanlog the expect mean value of lognormal distribution
#' @param sdlog the expect standard deviation of the lognormal distribution
#'
#'
#'
rlognormal=function(N,S,meanlog=6,sdlog=0.6){
n=rlnorm(S,meanlog,sdlog)
n=round(n/sum(n)*N,0)
if(sum(n)!=N){
i=which(n==max(n))
n[i]=N-sum(n)+n[i]
}
return(n)
}
|
5bf4d6448679bc41c08046f0ef2041c99ccd16a4
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/hetGP/inst/doc/hetGP_vignette.R
|
9883761cfc361635457c9fb6224d0e47f1f9527d
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,872
|
r
|
hetGP_vignette.R
|
## ----include=FALSE-------------------------------------------------------
library("knitr")
## cache can be set to TRUE
opts_chunk$set(
engine='R', tidy=FALSE, cache=FALSE, autodep=TRUE
)
render_sweave() # For JSS when using knitr
knitr::opts_chunk$set(fig.pos = 'ht!')
## ----preliminaries, echo=FALSE, results='hide'----------------------
options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE, scipen = 5)
## ----nl,message=FALSE-----------------------------------------------
library("hetGP")
nLL <- function(par, X, Y) {
theta <- par[1:ncol(X)]
tau2 <- par[ncol(X) + 1]
n <- length(Y)
K <- cov_gen(X1 = X, theta = theta) + diag(tau2, n)
Ki <- solve(K)
ldetK <- determinant(K, logarithm = TRUE)$modulus
(n / 2) * log(t(Y) %*% Ki %*% Y) + (1 / 2) * ldetK
}
## ----gnl------------------------------------------------------------
gnLL <- function(par, X, Y) {
n <- length(Y)
theta <- par[1:ncol(X)]; tau2 <- par[ncol(X) + 1]
K <- cov_gen(X1 = X, theta = theta) + diag(tau2, n)
Ki <- solve(K); KiY <- Ki %*% Y
dlltheta <- rep(NA, length(theta))
for(k in 1:length(dlltheta)) {
dotK <- K * as.matrix(dist(X[, k]))^2 / (theta[k]^2)
dlltheta[k] <- n * t(KiY) %*% dotK %*% KiY / (t(Y) %*% KiY) -
sum(diag(Ki %*% dotK))
}
dlltau2 <- n * t(KiY) %*% KiY / (t(Y) %*% KiY) - sum(diag(Ki))
-c(dlltheta / 2, dlltau2 / 2)
}
## ----exp2d----------------------------------------------------------
library("lhs")
X <- 6 * randomLHS(40, 2) - 2
X <- rbind(X, X)
y <- X[, 1] * exp(-X[, 1]^2 - X[, 2]^2) + rnorm(nrow(X), sd = 0.01)
## ----exp2doptim-----------------------------------------------------
Lwr <- sqrt(.Machine$double.eps); Upr <- 10
out <- optim(c(rep(0.1, 2), 0.1 * var(y)), nLL, gnLL, method = "L-BFGS-B",
lower = Lwr, upper = c(rep(Upr, 2), var(y)), X = X, Y = y)
out$par
## ----pred1----------------------------------------------------------
Ki <- solve(cov_gen(X, theta = out$par[1:2]) + diag(out$par[3], nrow(X)))
nuhat <- drop(t(y) %*% Ki %*% y / nrow(X))
## ----xx-------------------------------------------------------------
xx <- seq(-2, 4, length = 40)
XX <- as.matrix(expand.grid(xx, xx))
## ----pred-----------------------------------------------------------
KXX <- cov_gen(XX, theta = out$par[1:2]) + diag(out$par[3], nrow(XX))
KX <- cov_gen(XX, X, theta = out$par[1:2])
mup <- KX %*% Ki %*% y
Sigmap <- nuhat * (KXX - KX %*% Ki %*% t(KX))
## ----exp2dp, echo = FALSE, fig.height=6, fig.width=12, fig.align='center', fig.cap="\\label{fig:exp2d}Example predictive surface from a GP. Open circles are the training locations."----
library("colorspace")
sdp <- sqrt(diag(Sigmap))
par(mfrow = c(1,2))
cols <- sequential_hcl(palette = "Viridis", n = 128, l = c(40, 90))
persp(xx, xx, matrix(mup, ncol = length(xx)), theta = -30, phi = 30,
main = "mean surface", xlab = "x1", ylab = "x2", zlab = "y")
image(xx, xx, matrix(sdp, ncol = length(xx)), main = "variance",
xlab = "x1", ylab = "x2", col = cols)
points(X[, 1], X[, 2])
## ----library--------------------------------------------------------
fit <- mleHomGP(X, y, rep(Lwr, 2), rep(Upr, 2), known = list(beta0 = 0),
init = c(list(theta = rep(0.1, 2), g = 0.1 * var(y))))
c(fit$theta, fit$g)
## ----motofit--------------------------------------------------------
library("MASS")
hom <- mleHomGP(mcycle$times, mcycle$accel, covtype = "Matern5_2")
het <- mleHetGP(mcycle$times, mcycle$accel, covtype = "Matern5_2")
## ----motopred-------------------------------------------------------
Xgrid <- matrix(seq(0, 60, length = 301), ncol = 1)
p <- predict(x = Xgrid, object = hom)
p2 <- predict(x = Xgrid, object = het)
## ----motofig, echo=FALSE,fig.height=6, fig.width=7, out.width='4in', fig.align='center', fig.cap="\\label{fig:moto1}Homoskedastic (solid red) versus heteroskedastic (dashed blue) GP fits to the motorcycle data via mean (thick) and 95\\% error bars (thin). Open circles mark the actual data, dots are averaged observations $\\yu$ with corresponding error bars from the empirical variance (when $a_i > 0$)."----
plot(mcycle, main = "Predictive Surface", ylim = c(-160, 90),
ylab = "acceleration (g)", xlab = "time (ms)")
lines(Xgrid, p$mean, col = 2, lwd = 2)
lines(Xgrid, qnorm(0.05, p$mean, sqrt(p$sd2 + p$nugs)), col = 2)
lines(Xgrid, qnorm(0.95, p$mean, sqrt(p$sd2 + p$nugs)), col = 2)
lines(Xgrid, p2$mean, col = 4, lwd = 2, lty = 4)
lines(Xgrid, qnorm(0.05, p$mean, sqrt(p2$sd2 + p2$nugs)), col = 4, lty = 4)
lines(Xgrid, qnorm(0.95, p$mean, sqrt(p2$sd2 + p2$nugs)), col = 4, lty = 4)
empSd <- sapply(find_reps(mcycle[, 1], mcycle[, 2])$Zlist, sd)
points(het$X0, het$Z0, pch = 20)
arrows(x0 = het$X0, y0 = qnorm(0.05, het$Z0, empSd),
y1 = qnorm(0.95, het$Z0, empSd), code = 3, angle = 90, length = 0.01)
## ----sirdesign------------------------------------------------------
Xbar <- randomLHS(200, 2)
a <- sample(1:100, nrow(Xbar), replace = TRUE)
X <- matrix(NA, ncol = 2, nrow = sum(a))
nf <- 0
for(i in 1:nrow(Xbar)) {
X[(nf + 1):(nf + a[i]),] <- matrix(rep(Xbar[i,], a[i]), ncol = 2,
byrow = TRUE)
nf <- nf + a[i]
}
## ----sireval--------------------------------------------------------
Y <- apply(X, 1, sirEval)
## ----sirfit---------------------------------------------------------
fit <- mleHetGP(X, Y, lower = rep(0.05, 2), upper = rep(10, 2),
settings = list(linkThetas = "none"), covtype = "Matern5_2", maxit = 1e4)
## ----sirpred, echo = FALSE------------------------------------------
xx <- seq(0, 1, length = 100)
XX <- as.matrix(expand.grid(xx, xx))
p <- predict(fit, XX)
## ----sirvis, echo = FALSE, fig.height=6, fig.width=12, fig.align='center', fig.cap="\\label{fig:sir}Heteroskedastic GP fit to SIR data. Left panel shows the predictive mean surface; right panel shows the estimated standard deviation. Text in both panels shows numbers of replicates."----
psd <- sqrt(p$sd2 + p$nugs)
par(mfrow = c(1, 2))
image(xx, xx, matrix(p$mean, 100), xlab = "S0", ylab = "I0", col = cols,
main = "Mean Infected")
text(Xbar, labels = a, cex = 0.75)
image(xx, xx, matrix(psd, 100), xlab = "S0", ylab = "I0", col = cols,
main = "SD Infected")
text(Xbar, labels = a, cex = 0.75)
## ----loadbf---------------------------------------------------------
data("bfs")
thetas <- matrix(bfs.exp$theta, ncol = 1)
bfs <- as.matrix(t(bfs.exp[, -1]))
## ----fitbf----------------------------------------------------------
bfs1 <- mleHetTP(X = list(X0 = log10(thetas), Z0 = colMeans(log(bfs)),
mult = rep(nrow(bfs), ncol(bfs))), Z = log(as.numeric(bfs)),
lower = 10^(-4), upper = 5, covtype = "Matern5_2")
## ----predbf, echo = FALSE-------------------------------------------
dx <- seq(0, 1, length = 100)
dx <- 10^(dx * 4 - 3)
p <- predict(bfs1, matrix(log10(dx), ncol = 1))
## ----visbf, echo=FALSE,fig.height=6, fig.width=12, fig.align='center', fig.cap="Left: heteroskedastic TP fit to the Bayes factor data under exponential hyperprior. Right: output given by the \\code{plot} method."----
par(mfrow = c(1, 2))
matplot(log10(thetas), t(log(bfs)), col = 1, pch = 21, ylab = "log(bf)",
main = "Bayes factor surface")
lines(log10(dx), p$mean, lwd = 2, col = 2)
lines(log10(dx), p$mean + 2 * sqrt(p$sd2 + p$nugs), col = 2, lty = 2,
lwd = 2)
lines(log10(dx), p$mean + 2 * sqrt(p$sd2), col = 4, lty = 3, lwd = 2)
lines(log10(dx), p$mean - 2 * sqrt(p$sd2 + p$nugs), col = 2, lty = 2,
lwd = 2)
lines(log10(dx), p$mean - 2 * sqrt(p$sd2), col = 4, lty = 3, lwd = 2)
legend("topleft", c("hetTP mean", expression(paste("hetTP interval on Y(x)|", D[N])), "hetTP interval on f(x)"), col = c(2,2,4), lty = 1:3,
lwd = 2)
plot(bfs1)
par(mfrow = c(1,1))
## ----loadbf2--------------------------------------------------------
D <- as.matrix(bfs.gamma[, 1:2])
bfs <- as.matrix(t(bfs.gamma[, -(1:2)]))
## ----fitbf2---------------------------------------------------------
bfs2 <- mleHetTP(X = list(X0 = log10(D), Z0 = colMeans(log(bfs)),
mult = rep(nrow(bfs), ncol(bfs))), Z = log(as.numeric(bfs)),
lower = rep(10^(-4), 2), upper = rep(5, 2), covtype = "Matern5_2")
## ----predbf2,echo=FALSE---------------------------------------------
DD <- as.matrix(expand.grid(dx, dx))
p <- predict(bfs2, log10(DD))
## ----visbf2, echo = FALSE, fig.height=6, fig.width=12, fig.align='center', fig.cap="Heteroskedastic TP fit to the Bayes factor data under Gamma hyperprior."----
par(mfrow = c(1, 2))
mbfs <- colMeans(bfs)
image(log10(dx), log10(dx), t(matrix(p$mean, ncol=length(dx))),
col = cols, xlab = "log10 alpha", ylab = "log10 beta",
main = "mean log BF")
text(log10(D[, 2]), log10(D[, 1]), signif(log(mbfs), 2), cex = 0.75)
contour(log10(dx), log10(dx), t(matrix(p$mean, ncol = length(dx))),
levels = c(-5, -3, -1, 0, 1, 3, 5), add = TRUE, col = 4)
image(log10(dx), log10(dx), t(matrix(sqrt(p$sd2 + p$nugs),
ncol = length(dx))), col = cols, xlab = "log10 alpha",
ylab = "log10 beta", main = "sd log BF")
text(log10(D[, 2]), log10(D[, 1]), signif(apply(log(bfs), 2, sd), 2),
cex = 0.75)
## ----atoload--------------------------------------------------------
data("ato")
## ----atotime--------------------------------------------------------
c(n = nrow(Xtrain), N = length(unlist(Ztrain)), time = out$time)
## ----atotestscore---------------------------------------------------
sc <- scores(out, Xtest, matrix(unlist(Ztest), byrow = TRUE, ncol = 10))
## ----atotrainscore--------------------------------------------------
sc.out <- scores(model = out, Xtest = Xtrain.out, Ztest = Ztrain.out)
## ----atobothscore---------------------------------------------------
c(test = mean(sc), train = mean(sc.out), combined = mean(c(sc, sc.out)))
## ----twors----------------------------------------------------------
rn <- c(4.5, 5.5, 6.5, 6, 3.5)
X0 <- matrix(seq(0.05, 0.95, length.out = length(rn)))
X1 <- matrix(c(X0, 0.2, 0.4))
Y1 <- c(rn, 5.2, 6.3)
r1 <- splinefun(x = X1, y = Y1, method = "natural")
X2 <- matrix(c(X0, 0.0, 0.3))
Y2 <- c(rn, 7, 4)
r2 <- splinefun(x = X2, y = Y2, method = "natural")
## ----twovarsXX------------------------------------------------------
XX <- matrix(seq(0, 1, by = 0.005))
## ----imspe.r--------------------------------------------------------
IMSPE.r <- function(x, X0, theta, r) {
x <- matrix(x, nrow = 1)
Wijs <- Wij(mu1 = rbind(X0, x), theta = theta, type = "Gaussian")
K <- cov_gen(X1 = rbind(X0, x), theta = theta)
K <- K + diag(apply(rbind(X0, x), 1, r))
return(1 - sum(solve(K) * Wijs))
}
## ----twoimspe-------------------------------------------------------
imspe1 <- apply(XX, 1, IMSPE.r, X0 = X0, theta = 0.25, r = r1)
imspe2 <- apply(XX, 1, IMSPE.r, X0 = X0, theta = 0.25, r = r2)
xstar1 <- which.min(imspe1)
xstar2 <- which.min(imspe2)
## ----rx-------------------------------------------------------------
rx <- function(x, X0, rn, theta, Ki, kstar, Wijs) {
x <- matrix(x, nrow = 1)
kn1 <- cov_gen(x, X0, theta = theta)
wn <- Wij(mu1 = x, mu2 = X0, theta = theta, type = "Gaussian")
a <- kn1 %*% Ki %*% Wijs %*% Ki %*% t(kn1) - 2 * wn %*% Ki %*% t(kn1)
a <- a + Wij(mu1 = x, theta = theta, type = "Gaussian")
Bk <- tcrossprod(Ki[, kstar], Ki[kstar,]) /
(2 / rn[kstar] - Ki[kstar, kstar])
b <- sum(Bk * Wijs)
sn <- 1 - kn1 %*% Ki %*% t(kn1)
return(a / b - sn)
}
## ----rxeval---------------------------------------------------------
bestk <- which.min(apply(X0, 1, IMSPE.r, X0 = X0, theta = 0.25, r = r1))
Wijs <- Wij(X0, theta = 0.25, type = "Gaussian")
Ki <- solve(cov_gen(X0, theta = 0.25, type = "Gaussian") + diag(rn))
rx.thresh <- apply(XX, 1, rx, X0 = X0, rn = rn, theta = 0.25, Ki = Ki,
kstar = bestk, Wijs = Wijs)
## ----threersfig, echo=FALSE, fig.height=5, fig.width=5, fig.show='hide'----
plot(X0, rn, xlab = "x", ylab = "r(x)", xlim = c(0, 1), ylim = c(2, 8),
col = 2, main = "Two variance hypotheses")
lines(XX, r1(XX), col = 3)
lines(XX, r2(XX), col = 4)
lines(XX, rx.thresh, lty = 2, col = "darkgrey")
points(XX[xstar1], r1(XX[xstar1]), pch = 23, bg = 3)
points(XX[xstar2], r2(XX[xstar2]), pch = 23, bg = 4)
points(X0, rn, col = 2)
## ----threeimspefig, echo = FALSE, fig.height=5, fig.width=5, fig.show='hide'----
plot(XX, imspe1, type = "l", col = 3, ylab = "IMSPE", xlab = "x",
ylim = c(0.6, 0.7), main = "IMSPE for two variances")
lines(XX, imspe2, col = 4)
abline(v = X0, lty = 3, col = 'red')
points(XX[xstar1], imspe1[xstar1], pch = 23, bg = 3)
points(XX[xstar2], imspe2[xstar2], pch = 23, bg = 4)
## ----forr-----------------------------------------------------------
fn <- function(x) { 1/3 * (exp(sin(2 * pi * x))) }
fr <- function(x) { f1d2(x) + rnorm(length(x), sd = fn(x)) }
## ----forrinit-------------------------------------------------------
X <- seq(0, 1, length = 10)
Y <- fr(X)
mod <- mleHetGP(X = X, Z = Y, lower = 0.0001, upper = 1)
## ----forrIMSPE------------------------------------------------------
opt <- IMSPE_optim(mod, h = 5)
c(X, opt$par)
## ----forrupdate-----------------------------------------------------
X <- c(X, opt$par)
Ynew <- fr(opt$par)
Y <- c(Y, Ynew)
mod <- update(mod, Xnew = opt$par, Znew = Ynew, ginit = mod$g * 1.01)
## ----forr500--------------------------------------------------------
for(i in 1:489) {
opt <- IMSPE_optim(mod, h = 5)
X <- c(X, opt$par)
Ynew <- fr(opt$par)
Y <- c(Y, Ynew)
mod <- update(mod, Xnew = opt$par, Znew = Ynew, ginit = mod$g * 1.01)
if(i %% 25 == 0) {
mod2 <- mleHetGP(X = list(X0 = mod$X0, Z0 = mod$Z0, mult = mod$mult),
Z = mod$Z, lower = 0.0001, upper = 1)
if(mod2$ll > mod$ll) mod <- mod2
}
}
## ----forrn, echo=FALSE, results='hide'------------------------------
nrow(mod$X0)
## ----forrpred-------------------------------------------------------
xgrid <- seq(0, 1, length = 1000)
p <- predict(mod, matrix(xgrid, ncol = 1))
pvar <- p$sd2 + p$nugs
## ----forrfig, echo = FALSE, fig.height=5, fig.width=6, out.width="5in", out.height="4.2in", fig.align='center', fig.cap="\\label{fig:forr}Sequential design with horizon $h=5$. The truth is in black and the predictive distribution in red."----
plot(xgrid, f1d2(xgrid), type = "l", xlab = "x", ylab = "y",
main="1d example, IMSPE h=5", ylim = c(-4, 5))
lines(xgrid, qnorm(0.05, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
lines(xgrid, qnorm(0.95, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
points(X, Y)
segments(mod$X0, rep(0, nrow(mod$X0)) - 4, mod$X0, mod$mult * 0.25 - 4,
col = "gray")
lines(xgrid, p$mean, col = 2)
lines(xgrid, qnorm(0.05, p$mean, sqrt(pvar)), col = 2, lty = 2)
lines(xgrid, qnorm(0.95, p$mean, sqrt(pvar)), col = 2, lty = 2)
legend("top", c("truth", "estimate"), col = 1:2, lty = 1:2)
## ----adapt, warning=FALSE,message=FALSE-----------------------------
X <- seq(0, 1, length = 10)
Y <- fr(X)
mod.a <- mleHetGP(X = X, Z = Y, lower = 0.0001, upper = 1)
h <- rep(NA, 500)
## ----adapt2---------------------------------------------------------
for(i in 1:490) {
h[i] <- horizon(mod.a)
opt <- IMSPE_optim(mod.a, h = h[i])
X <- c(X, opt$par)
Ynew <- fr(opt$par)
Y <- c(Y, Ynew)
mod.a <- update(mod.a, Xnew = opt$par, Znew = Ynew, ginit = mod.a$g * 1.01)
if(i %% 25 == 0) {
mod2 <- mleHetGP(X = list(X0 = mod.a$X0, Z0 = mod.a$Z0,
mult = mod.a$mult), Z = mod.a$Z, lower = 0.0001, upper = 1)
if(mod2$ll > mod.a$ll) mod.a <- mod2
}
}
## ----adapt3, echo = FALSE-------------------------------------------
p.a <- predict(mod.a, matrix(xgrid, ncol = 1))
pvar.a <- p.a$sd2 + p.a$nugs
## ----adapfig, echo = FALSE, fig.height=4, fig.width=8, out.width="6in", out.height="3in", fig.align='center', fig.cap="\\label{fig:adapt}{\\em Left:} Horizons chosen per iteration; {\\em right:} final design and predictions versus the truth, similar to Figure \\ref{fig:forr}."----
par(mfrow = c(1, 2))
plot(h, main = "Horizon", xlab = "Iteration")
plot(xgrid, f1d2(xgrid), type = "l", xlab = "x", ylab = "y",
main = "Adaptive Horizon Design", ylim = c(-4, 5))
lines(xgrid, qnorm(0.05, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
lines(xgrid, qnorm(0.95, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
points(X, Y)
segments(mod$X0, rep(0, nrow(mod$X0)) - 4, mod$X0, mod$mult * 0.25 - 4,
col = "gray")
lines(xgrid, p$mean, col = 2)
lines(xgrid, qnorm(0.05, p$mean, sqrt(pvar.a)), col = 2, lty = 2)
lines(xgrid, qnorm(0.95, p$mean, sqrt(pvar.a)), col = 2, lty = 2)
## ----adaptn, echo=FALSE, results='hide'-----------------------------
nrow(mod.a$X0)
## ----rmsescore------------------------------------------------------
ytrue <- f1d2(xgrid)
yy <- fr(xgrid)
rbind(rmse = c(h5 = mean((ytrue - p$mean)^2),
ha = mean((ytrue - p.a$mean)^2)),
score = c(h5 = - mean((yy - p$mean)^2 / pvar + log(pvar)),
ha = -mean((yy - p.a$mean)^2 / pvar.a + log(pvar.a))))
## ----atoatime-------------------------------------------------------
c(n = nrow(out.a$X0), N = length(out.a$Z), time = out.a$time)
## ----atoatestscore--------------------------------------------------
sc.a <- scores(out.a, Xtest = Xtest, Ztest = Ztest)
c(batch = sc, adaptive = sc.a)
## ----atorebuild-----------------------------------------------------
out.a <- rebuild(out.a)
## ----atoadapt-------------------------------------------------------
Wijs <- Wij(out.a$X0, theta = out.a$theta, type = out.a$covtype)
h <- horizon(out.a, Wijs = Wijs)
control <- list(tol_dist = 1e-4, tol_diff = 1e-4, multi.start = 30)
opt <- IMSPE_optim(out.a, h, Wijs = Wijs, control = control)
## ----atoopt---------------------------------------------------------
opt$par
## ----atooptunique---------------------------------------------------
opt$path[[1]]$new
## ----EIahead, warning=FALSE, message=FALSE--------------------------
X <- seq(0, 1, length = 10)
X <- c(X, X, X)
Y <- -fr(X)
mod <- mleHetGP(X = X, Z = Y)
## ----EIahead2-------------------------------------------------------
library("parallel")
ncores <- 1 # or: detectCores()
for(i in 1:470) {
opt <- crit_optim(mod, crit = "crit_EI", h = 5, ncores = ncores)
X <- c(X, opt$par)
Ynew <- -fr(opt$par)
Y <- c(Y, Ynew)
mod <- update(mod, Xnew = opt$par, Znew = Ynew, ginit = mod$g * 1.01)
if(i %% 25 == 0) {
mod2 <- mleHetGP(X = list(X0 = mod$X0, Z0 = mod$Z0, mult = mod$mult),
Z = mod$Z, lower = 0.0001, upper = 1)
if(mod2$ll > mod$ll) mod <- mod2
}
}
## ----EIahead3-------------------------------------------------------
p <- predict(mod, matrix(xgrid, ncol = 1))
pvar <- p$sd2 + p$nugs
## ----EIgraphs, echo = FALSE,fig.height=5, fig.width=6, out.width="5in", out.height="4.2in", fig.align='center', fig.cap="\\label{fig:ei} Sequential optimization with horizon $h = 5$. The truth is in black and the predictive distribution in red ."----
plot(xgrid, f1d2(xgrid), type = "l", xlab = "x", ylab = "y",
ylim = c(-4, 5), main = "1d example with EI, h = 5")
lines(xgrid, qnorm(0.05, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
lines(xgrid, qnorm(0.95, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
points(X, -Y)
segments(mod$X0, rep(0, nrow(mod$X0)) - 4, mod$X0, mod$mult * 0.5 - 4,
col = "gray")
lines(xgrid, -p$mean, col = 2)
lines(xgrid, qnorm(0.05, -p$mean, sqrt(pvar)), col = 2, lty = 2)
lines(xgrid, qnorm(0.95, -p$mean, sqrt(pvar)), col = 2, lty = 2)
legend("top", c("truth", "estimate"), col = 1:2, lty = 1:2)
## ----EIreps---------------------------------------------------------
nrow(mod$X0)
## ----Contour_ahead, warning=FALSE, message=FALSE--------------------
X <- seq(0, 1, length = 10)
X <- c(X, X, X)
Y <- fr(X)
mod <- mleHetGP(X = X, Z = Y)
for(i in 1:470) {
opt <- crit_optim(mod, crit = "crit_cSUR", h = 5, ncores = ncores)
X <- c(X, opt$par)
Ynew <- fr(opt$par)
Y <- c(Y, Ynew)
mod <- update(mod, Xnew = opt$par, Znew = Ynew, ginit = mod$g * 1.01)
if(i %% 25 == 0) {
mod2 <- mleHetGP(X = list(X0 = mod$X0, Z0 = mod$Z0, mult = mod$mult),
Z = mod$Z, lower = 0.0001, upper = 1)
if(mod2$ll > mod$ll) mod <- mod2
}
}
p <- predict(mod, matrix(xgrid, ncol = 1))
pvar <- p$sd2 + p$nugs
## ----contour_n------------------------------------------------------
nrow(mod$X0)
## ----cSURgraphs, echo = FALSE, fig.height=5, fig.width=6, out.width="5in", out.height="4.2in", fig.align='center', fig.cap="\\label{fig:contour} Sequential contour finding with horizon $h = 5$. The truth is in black and the predictive distribution in red."----
plot(xgrid, f1d2(xgrid), type = "l", xlab = "x", ylab = "y",
ylim = c(-4, 5), main="1d example with cSUR, h = 5")
lines(xgrid, qnorm(0.05, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
lines(xgrid, qnorm(0.95, f1d2(xgrid), fn(xgrid)), col = 1, lty = 2)
points(X, Y)
segments(mod$X0, rep(0, nrow(mod$X0)) - 4, mod$X0, mod$mult * 0.5 - 4,
col="gray")
lines(xgrid, p$mean, col = 2)
lines(xgrid, qnorm(0.05, p$mean, sqrt(pvar)), col = 2, lty = 2)
lines(xgrid, qnorm(0.95, p$mean, sqrt(pvar)), col = 2, lty = 2)
legend("top", c("truth", "estimate"), col = 1:2, lty = 1:2)
abline(h = 0, col = "blue")
|
a1ac91e691b48882bc3dcf2dd0b46da04ecf1027
|
2abd33ed5fb7048bde5f7715c2d404bdb31406d0
|
/Week 11/Studio11/perm.log.reg.R.txt
|
aac6d7e97d163517346692a2476d6f9235cbe926
|
[] |
no_license
|
theRealAndyYang/FIT2086-Modelling-for-Data-Analysis
|
ba609c3b7a63f414d5e19968f9d6650864590e5c
|
81fa3a4a2ffe47dadb9702ae77115203766094a0
|
refs/heads/master
| 2022-11-26T03:57:22.673076
| 2020-08-05T11:13:53
| 2020-08-05T11:13:53
| 285,262,206
| 9
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,594
|
txt
|
perm.log.reg.R.txt
|
perm.log.reg = function(formula, data, R)
{
rv = list()
n = nrow(data)
# Fit the initial logistic regression model to the original data
rv$fit = glm(formula, data, family=binomial)
# Figure out what the target variable is, and how many coefficients there are
target = as.character(rv$fit$terms[[2]])
rv$perm.coef = matrix(NA,R,length(fit$coefficients),dimnames=list(NULL,names(fit$coefficients)))
# Do the permutations
d = data
for (i in 1:R)
{
# Permute the targets using sample(n) to generate a random ordering of
# integers from 1:n
d[,target] = d[sample(n),target]
# Fit the model using the permuted data
fit.perm = glm(formula, d, family=binomial)
# Store the permuted coefficients into our matrix
rv$perm.coef[i,] = fit.perm$coefficients
}
# Compute the permutation p-values
# To do this we first use "sweep" to check if the elements
# of the abs(fit$coefficients) vector are greater than the
# corresponding elements of abs(rv$perm.coef) for each row, then
# take the mean to get the proportion of times the permuted coefficients
# are larger than the original fitted coefficients
rv$p.value = colMeans( sweep(abs(rv$perm.coef), 2, abs(fit$coefficients), ">") )
# this is an efficient implementation fo the following code:
#
# rv$p.value = vector(length=length(fit$coefficients))
# for (j in 1:R)
# {
# rv$p.value = rv$p.value + (abs(rv$perm.coef[j,]) > abs(fit$coefficients))
# }
# rv$p.value = rv$p.value / R
return(rv)
}
|
7124180c89e0d5e4ceb127840322e545260f288a
|
36fe24142a262ab40d5b0c8f1f5ceecf8fd6b8cf
|
/man/IVoverid.Rd
|
85ea3c40c33dbcc97db531519ec4df55a55523f3
|
[] |
no_license
|
shizelong1985/ManyIV
|
2619a18d304a8ad4b65935aa652b271aa7f068f6
|
7b5c3e957b7488f72ddd12d2221f288665bb730c
|
refs/heads/master
| 2023-03-17T15:07:42.329751
| 2021-02-22T18:38:26
| 2021-02-22T18:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,415
|
rd
|
IVoverid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manyiv.R
\name{IVoverid}
\alias{IVoverid}
\title{Test of overidentifying restrictions}
\usage{
IVoverid(r)
}
\arguments{
\item{r}{An object of class \code{RDResults}}
}
\description{
Report the Sargan and modified Cragg-Donald test statistics and
\eqn{p}-values for testing of overidentifying restrictions, assuming
homoskedasticity of the reduced form. The Sargan test is valid under few
instruments. The Modified Cragg-Donald test (Modified-CD) corresponds to a
test due to Cragg and Donald (1993), with a modified critical value. The
modification was suggested in Kolesár (2018) to make it robust to many
instruments and many exogenous regressors.
}
\examples{
r1 <- IVreg(lwage~education+black+married | as.factor(qob), data=ak80,
inference="standard")
IVoverid(r1)
}
\references{
{
\cite{Kolesár, Michal. Minimum Distance Approach to Inference with Many
Instruments.” Journal of Econometrics 204 (1): 86–100.
\doi{10.1016/j.jeconom.2018.01.004}.}
\cite{Cragg, John G., and Stephen G. Donald. 1993. "Testing Identifiability
and Specification in Instrumental Variable Models." Econometric Theory 9 (2):
222–40. \doi{10.1017/S0266466600007519}.}
\cite{Sargan, John Denis. 1958. "The Estimation of Economic Relationships
Using Instrumental Variables." Econometrica 26 (3): 393–415.
\doi{10.2307/1907619}.}
}
}
|
855a34b2bf9163b0ad3ce19c85f0125f4653fa89
|
ab79177ad95b0e89d70210a3478b91f98cdb6b30
|
/man/unit_contrast.Rd
|
a5d9584fe0754c7e2cbe1b54e3a6f11876205ddf
|
[] |
no_license
|
bbuchsbaum/fmrireg
|
93e69866fe8afb655596aa23c6f9e3ca4004a81c
|
2dd004018b3b7997e70759fc1652c8d51e0398d7
|
refs/heads/master
| 2023-05-10T17:01:56.484913
| 2023-05-09T14:38:24
| 2023-05-09T14:38:24
| 18,412,463
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
unit_contrast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/contrast.R
\name{unit_contrast}
\alias{unit_contrast}
\title{Unit Contrast}
\usage{
unit_contrast(A, name, where = NULL)
}
\arguments{
\item{A}{A formula representing the contrast expression.}
\item{name}{A character string specifying the name of the contrast.}
\item{where}{An optional formula specifying the subset of conditions to apply the contrast to.}
}
\value{
A unit_contrast_spec object containing the contrast that sums to 1.
}
\description{
Construct a contrast that sums to 1 and is used to define contrasts against the baseline.
}
\examples{
con <- unit_contrast(~ Face, name="Main_face")
}
|
933dfa83d04e5cbabd762680a352992bd064e36d
|
9430e5cd40071a7a0a5e92a3a17ee4706538f0d3
|
/man/getSimWang.Rd
|
da9095107bd0ca614bc2abcc47b681874d556b9c
|
[] |
no_license
|
MoudFassad/HPOSim
|
63314cf60d420dc402fb42411f9557af82687f61
|
03a2559b5d0cedc8db6a6e207236a1cd220a6763
|
refs/heads/master
| 2022-03-30T07:25:05.146874
| 2020-01-09T12:38:10
| 2020-01-09T12:38:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 704
|
rd
|
getSimWang.Rd
|
\name{getSimWang}
\alias{getSimWang}
\title{ Semantic Similarity Between Two HPO Terms by Wang's Method }
\description{
Given two HPO terms, this function will calculate the Wang's Semantic Similarity between them
}
\usage{
getSimWang(term1, term2)
}
\arguments{
\item{term1}{ one HPO term }
\item{term2}{ another HPO term }
}
\value{ Semantic similarity.}
\references{
[1] J. Z. Wang, Z. Du, R. Payattakool, P. S. Yu, and C.-F. Chen, "A new method to measure the semantic similarity of GO terms", Bioinformatics, vol. 23, no. 10, pp. 1274-1281, May. 2007.
}
\author{
Yue Deng <anfdeng@163.com>
}
\examples{
getSimWang("HP:0000028","HP:0000033")
}
\keyword{ manip }
|
7d49ea3581567b89fa9d942c0d51cd64f26a9955
|
768a5e8713ed0751fdea1fc0512dc5e87c1c06b0
|
/R/EvapHeat.R
|
99d7c04baae5e1049a72774e6b65743156891bc8
|
[] |
no_license
|
cran/EcoHydRology
|
c854757a7f70f91b3d33d6f7c5313752bf2819e3
|
d152909c12e6bb0c1f16b987aa7f49737cdcf3d3
|
refs/heads/master
| 2020-05-16T21:01:18.881492
| 2018-09-24T11:52:33
| 2018-09-24T11:52:33
| 17,691,749
| 6
| 6
| null | 2018-08-29T19:54:05
| 2014-03-13T02:26:21
|
R
|
UTF-8
|
R
| false
| false
| 819
|
r
|
EvapHeat.R
|
EvapHeat <- function (surftemp, airtemp, relativehumidity=NULL, Tn=NULL, wind=2) {
## surftemp: Temperature of surface [degrees C]
## airtemp: Temperature of air [degrees C]
## relativehumidity: between 0 - 1 [-]
## Tn minimum dailiy air temperature, assumed to be the dewpoint temperature [C]
## surftemp: Temperature of surface [degrees C]
## wind average daily windspeed [m/s]
windfunction <- 5.3 * (1 + wind)
if (relativehumidity >= 0 & relativehumidity <= 1) {
airvapordensity <- relativehumidity * SatVaporDensity(airtemp)
}
else {
airvapordensity <- SatVaporDensity(Tn)
}
surfacevapordensity <- SatVaporDensity(surftemp)
return(round(86400 * windfunction * (surfacevapordensity - airvapordensity)))
}
|
c62c092c07f19dc80d0215b26e3d3592682a4af2
|
42312ee43cf5ce7c629ea43841ca4a2c7f4e176f
|
/inst/shiny/server.R
|
a21436930b9d2bf6e6e7b296e224dac84b60ff42
|
[] |
no_license
|
marieannevibet/ArchaeoPhases
|
dca4175f9e9655caa702442d3779bb178aa304da
|
d6094e9335a23d78e5fe11ddb3cf10c8ad95cd49
|
refs/heads/master
| 2020-12-02T08:02:28.833445
| 2017-09-28T11:59:23
| 2017-09-28T11:59:23
| 96,762,386
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,018
|
r
|
server.R
|
shinyServer(function(input, output, clientData, session) {
# By default, Shiny limits file uploads to 5MB per file. You can modify this limit by using the shiny.maxRequestSize option.
# For example, adding options(shiny.maxRequestSize=30*1024^2) to the top of server.R would increase the limit to 30MB.
options(shiny.maxRequestSize=30*1024^2)
#######################################
#### Onglet : Import CSV #####
dataInput <- reactive({
file1 <- input$file11
if(is.null(file1)){return()}
if(input$iterationColumn1=="NULL"){
itCol = NULL
}else{
itCol = as.numeric(input$iterationColumn1)
}
if(input$referenceYear1=="NULL"){
refY = NULL
}else{
refY = as.numeric(input$referenceYear1)
}
if(input$rowToWithdraw1=="NULL"){
rowW = NULL
}else{
rowW = as.numeric(input$rowToWithdraw1)
}
ImportCSV(file=file1$datapath, sep=input$sep11, dec=input$dec11, header=TRUE, comment.char="#", iterationColumn = itCol, referenceYear = refY, rowToWithdraw = rowW)
})
output$filedf11 <- renderTable({
if(is.null(dataInput())){return()}
input$file11
})
output$table11 <- renderDataTable({
if(is.null(dataInput())){return()}
datatable(dataInput(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
output$AfficheTableLue11 <- renderUI({
if(is.null(dataInput()))
h5("No data imported")
else
tabsetPanel(tabPanel("About file", tableOutput("filedf11")), tabPanel("Data", DT::dataTableOutput("table11")))
})
##################################################
#### MCMC des MinMax des groupes ###
namesG <- reactive({
names = colnames(dataInput())
return(names)
})
# Initialize reactive values
valuesG <- reactiveValues()
dataInput12 <- reactive({
file2 <- input$file12
if(is.null(file2)){return()}
if(input$iterationColumn2=="NULL"){
itCol = NULL
}else{
itCol = as.numeric(input$iterationColumn2)
}
if(input$referenceYear2=="NULL"){
refY = NULL
}else{
refY = as.numeric(input$referenceYear2)
}
if(input$rowToWithdraw2=="NULL"){
rowW = NULL
}else{
rowW = as.numeric(input$rowToWithdraw2)
}
ImportCSV(file=file2$datapath, sep=input$sep12, dec=input$dec12, header=TRUE, comment.char="#", iterationColumn = itCol, referenceYear = refY, rowToWithdraw = rowW)
})
output$filedf12 <- renderTable({
if(is.null(dataInput12())){return()}
input$file12
})
output$table12 <- renderDataTable({
if(is.null(dataInput12())){return()}
datatable(dataInput12(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
output$AfficheTableLue12 <- renderUI({
if(is.null(dataInput12()))
h5("No data imported")
else
tabsetPanel(tabPanel("About file", tableOutput("filedf12")), tabPanel("Data", DT::dataTableOutput("table12")))
})
observeEvent(input$StockageFile2, {
valuesG$file2 <- dataInput12()
})
##################################################
#### Creation des groupes de dates ####
#### pour calcul des MinMax ###
output$ChainsSelectionG <- renderUI({
themesG <- namesG()
valuesG$namesG <- themesG
checkboxGroupInput('ChainsSelectionG', 'Select a series of dates:', themesG)
})
# Add observer on select-all button
observeEvent(input$selectAllG, {
valuesG$namesG <- namesG()
updateCheckboxGroupInput(session, 'ChainsSelectionG', selected = valuesG$namesG)
})
# Add observer on clear-all button
observeEvent(input$clearAllG, {
valuesG$namesG <- c()
updateCheckboxGroupInput(session, 'ChainsSelectionG', selected = "none")
})
# data selectionnees
selectDataG <- reactive({
dataInput()[, input$ChainsSelectionG, drop = FALSE]
})
# affichage table de donnees
output$DatasetG <- renderDataTable({
if(is.null(selectDataG())){return( )}
datatable(selectDataG(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
## CreateMinMaxGroup
createGroup1 <- eventReactive(input$goButton, {
position = seq(1, length(input$ChainsSelectionG))
dataGroup = CreateMinMaxGroup(selectDataG(), position=position, name =input$name, add=NULL)
})
observeEvent(input$goButton, {
valuesG$dataGroup <- createGroup1()
})
observeEvent(input$addButton, {
valuesG$dataGroup <- addGroup()
})
addGroup <- eventReactive(input$addButton, {
position = seq(1, length(input$ChainsSelectionG))
addGroup = CreateMinMaxGroup(selectDataG(), position=position, name =input$name, add=valuesG$dataGroup)#, exportFile=export)
return(addGroup)
})
observeEvent(input$clearButton, {
valuesG$dataGroup <- NULL
})
output$tableGroup <- renderDataTable({
if(is.null(valuesG$dataGroup)){return()}
datatable(valuesG$dataGroup, options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
output$result13 <- renderUI({
if(is.null(dataInput()))
h5("No data imported")
else
tabsetPanel(tabPanel("Data", DT::dataTableOutput("DatasetG")), tabPanel("Groups", DT::dataTableOutput("tableGroup")) )
})
output$downloadData <- downloadHandler(
filename = function() { paste("MinMaxGroup", '.csv', sep='') },
content = function(file) {
write.csv(valuesG$dataGroup, file)
}
)
observeEvent(input$StockageFile22, {
valuesG$file2 <- valuesG$dataGroup
})
#######################################
#### Onglet : Convergence ###
## Checking the Markov chains ##
# Affichage des colonnes du dataframe
namesCV <- reactive({
names = colnames(dataInput())
return(names)
})
# Initialize reactive values
valuesCV <- reactiveValues()
output$ChainsSelectionCV <- renderUI({
themesCV <- namesCV()
valuesCV$namesCV <- themesCV
checkboxGroupInput('ChainsSelectionCV', 'Select a series of dates (at least two):', choices =themesCV, selected = valuesCV$namesCV[1:2])
})
# Add observer on select-all button
observeEvent(input$selectAllCV, {
valuesCV$namesCV <- namesCV()
updateCheckboxGroupInput(session, 'ChainsSelectionCV', selected = valuesCV$namesCV)
})
# Add observer on clear-all button
observeEvent(input$clearAllCV, {
valuesCV$namesCV <- c()
updateCheckboxGroupInput(session, 'ChainsSelectionCV', selected = "none")
})
# data selectionnees
selectDataCV <- reactive({
dataInput()[, input$ChainsSelectionCV, drop = FALSE]
})
mcmc_List <- reactive({
if(is.null(selectDataCV)){return()}
coda.mcmc(selectDataCV(), numberChains = input$NbChains)#, iterationColumn = itC)
})
output$MCMCplot <- renderPlot({
plot(mcmc_List())
})
GelmanDiag <- reactive({
gelman.diag(mcmc_List())
})
output$GelmanDiagTable <- renderTable({
if(is.null(GelmanDiag())) {return()}
else {
res = GelmanDiag()$psrf
dim = dim(res)
namesRes = rownames(res)
PointEst = NULL
UpperCI = NULL
names = NULL
for (i in 1:dim[1]){
names = c(names, namesRes[i])
PointEst= c(PointEst, res[i,1])
UpperCI = c(UpperCI, res[i,2])
}
data.frame("names"=names,"Point estimate"=PointEst, "Upper Credible Interval" = UpperCI)
}
})
output$Gelmanplot <- renderPlot({
gelman.plot(mcmc_List())
})
output$Diagnostics <- renderUI({
if(is.null(selectDataCV()))
h5("No data imported")
else
tabsetPanel(tabPanel("History Plots", plotOutput("MCMCplot")), tabPanel("Gelman Plots", plotOutput("Gelmanplot")), tabPanel("Gelman Diagnostic", tableOutput("GelmanDiagTable")) )
})
#######################################
#### Onglet : Dates ####
#### Selection d une chaine ##
names <- reactive({
names = colnames(dataInput())
return(names)
})
observe({
updateSelectInput(session, inputId='variables', 'Select a MCMC chain', choices = names() )
})
selectChain <- reactive({
dataInput()[[ input$variables ]]
})
output$MarginalPlot <- renderPlot({
MarginalPlot(selectChain(), level = input$level, title = input$titlePlot, colors=input$color )
})
MarginalStatisticsText <- reactive({
MarginalStatistics(selectChain(), level = input$level)
})
output$MarginalStatisticsUI <- renderUI({
tags$div(
tags$p("Mean = ", MarginalStatisticsText()[1,1]),
tags$p("MAP = ", MarginalStatisticsText()[2,1]),
tags$p("sd = ", MarginalStatisticsText()[3,1]),
tags$p("Q1 = ", MarginalStatisticsText()[4,1]),
tags$p("Median = ", MarginalStatisticsText()[5,1]),
tags$p("Q2 = ", MarginalStatisticsText()[6,1]),
tags$p("For a level of confidence at ", MarginalStatisticsText()[7,1]*100, "%"),
tags$p("Credible Interval = [", MarginalStatisticsText()[8,1], "," , MarginalStatisticsText()[9,1], "]"),
tags$p("HPD region = [", MarginalStatisticsText()[10,1], "," , MarginalStatisticsText()[11,1], "]")
)
})
output$result2 <- renderUI({
if(is.null(dataInput()))
h5("No data imported")
else
tabsetPanel(tabPanel("Marginal plot", plotOutput("MarginalPlot")), tabPanel("Marginal statistics", uiOutput("MarginalStatisticsUI")))
})
output$downloadPlotDates <- downloadHandler(
filename = function() { paste("MarginalPlot", '.png', sep='') },
content = function(file) {
png(file)
MarginalPlot(selectChain(), level = input$level, title = input$titlePlot, colors=input$color )
dev.off()
}
)
##### Onglet : Dates - Selection plusieurs chaines ####
# Initialize reactive values
values <- reactiveValues()
output$ChainsSelection <- renderUI({
themes <- names()
values$names <- themes
checkboxGroupInput('multiChainsCI', 'Select numbers:', themes)
})
# Add observer on select-all button
observeEvent(input$selectAll, {
values$names <- names()
updateCheckboxGroupInput(session, 'multiChainsCI', selected = values$names)
})
# Add observer on clear-all button
observeEvent(input$clearAll, {
values$names <- c()
updateCheckboxGroupInput(session, 'multiChainsCI', selected = "none")
})
# data selectionnees
selectData <- reactive({
dataInput()[, input$multiChainsCI, drop = FALSE]
})
# affichage table de donnees
output$DatasetCI <- renderDataTable({
if(is.null(selectData())){return( )}
datatable(selectData(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
# calcul des IC
MultiCredibleIntervalText <- reactive({
if(is.null( input$multiChainsCI )) { return()}
position = seq(1, length(input$multiChainsCI))
MultiCredibleInterval(selectData(), position, level = input$level22)
})
# affichage des resultats des IC
output$resultTableMCI <- renderTable({
if(is.null(MultiCredibleIntervalText())) {return()}
else {
dim = dim(MultiCredibleIntervalText())
names_CI = rownames(MultiCredibleIntervalText())
CIInf = NULL
CISup = NULL
name = NULL
for (i in 1:dim[1]){
name = c(name, names_CI[i])
CIInf= c(CIInf, MultiCredibleIntervalText()[i,2])
CISup = c(CISup, MultiCredibleIntervalText()[i,3])
}
data.frame("names"=name, "Credible Interval Inf"=CIInf, "Credible Interval Sup" = CISup)
}
})
### calcul des HPD
MultiHPDText <- reactive({
if(is.null( input$multiChainsCI )) { return()}
position = seq(1, length(input$multiChainsCI))
MultiHPD(selectData(), position, level = input$level22)
})
# affichage des resultats des IC
output$resultTableMHPD <- renderTable({
if(is.null(MultiHPDText())) {return()}
else {
dim = dim(MultiHPDText())
names_HPD = rownames(MultiHPDText())
HPDInf = NULL
HPDSup = NULL
name = NULL
for (i in 1:dim[1]){
name = c(name, names_HPD[i])
HPDInf= c(HPDInf, MultiHPDText()[i,2])
HPDSup = c(HPDSup, MultiHPDText()[i,3])
}
data.frame("names"=name, "HPD Inf"=HPDInf, "HPD Sup" = HPDSup)
}
})
output$MultiDatesPlot <- renderPlot({
if(is.null( input$multiChainsCI )) { return()}
position = seq(1, length(input$multiChainsCI))
#if(input$exportFile22IT == "TRUE") { outFile = "IntervalsPlot"} else{ outFile = NULL}
MultiDatesPlot(selectData(), position, intervals =input$intervals, order = input$order, level = input$level, title = input$titleIntervalsplot, newWindow=FALSE, print.data.result = FALSE)
})#, height = 600, width = 800)
output$downloadIntervalPlot <- downloadHandler(
filename = function() { paste("downloadIntervalPlot", '.png', sep='') },
content = function(file) {
position = seq(1, length(input$multiChainsCI))
png(file)
MultiDatesPlot(selectData(), position, intervals =input$intervals, level = input$level, title = input$titleIntervalsplot, print.data.result = FALSE)
dev.off()
}
)
output$ui<- renderUI({
switch(input$count,
"TRUE" = textInput(inputId='ylabel', label="y-label", "Cumulative events" ),
"FALSE" = textInput(inputId='ylabel', label="y-label", "Probability" )
)
})
output$TempoPlot <- renderPlot({
if(is.null( input$multiChainsCI )) { return()}
position = seq(1, length(input$multiChainsCI))
#if(input$exportFile22 == "TRUE") { outFile = "TempoPlot.png"} else{ outFile = NULL}
TempoPlot(selectData(), position, level = input$level, title = input$titleTempoplot, Gauss=input$GaussCI, count=input$count, x.label=input$xlabel, y.label=input$ylabel, colors = input$colors, newWindow=FALSE, print.data.result = FALSE)
})#, height = 600, width = 800)
output$TempoPlotUI <- renderUI({
if(is.null( input$multiChainsCI )) {h5(" Nothing to display ")}
else{
plotOutput("TempoPlot", width="80%")
}
})
output$downloadTempoPlot <- downloadHandler(
filename = function() { paste("downloadTempoPlot", '.png', sep='') },
content = function(file) {
position = seq(1, length(input$multiChainsCI))
png(file)
TempoPlot(selectData(), position, level = input$level, title = input$titleTempoplot, Gauss=input$GaussCI, count=input$count, x.label=input$xlabel, y.label=input$ylabel, colors = input$colors, print.data.result = FALSE)#, out.file=outFile)
dev.off()
}
)
output$TempoActivityPlot <- renderPlot({
if(is.null( input$multiChainsCI )) { return()}
position = seq(1, length(input$multiChainsCI))
TempoActivityPlot(selectData(), position, level = input$level, count=input$count, newWindow=FALSE, print.data.result = FALSE)
})#, height = 600, width = 800)
output$TempoActivityPlotUI <- renderUI({
if(is.null( input$multiChainsCI )) {h5(" Nothing to display ")}
else{
plotOutput("TempoActivityPlot", width="80%")
}
})
output$downloadActivityPlot <- downloadHandler(
filename = function() { paste("downloadActivityPlot", '.png', sep='') },
content = function(file) {
position = seq(1, length(input$multiChainsCI))
png(file)
TempoActivityPlot(selectData(), position, level = input$level, count=input$count, print.data.result = FALSE)
dev.off()
}
)
output$OccurrencePlot <- renderPlot({
if(is.null( input$multiChainsCI )) { return()}
position = seq(1, length(input$multiChainsCI))
OccurrencePlot(selectData(), position, level = input$level, count=input$count, newWindow=FALSE, print.data.result = FALSE)
})
output$OccurrencePlotUI <- renderUI({
if(is.null( input$multiChainsCI )) {h5(" Nothing to display ")}
else{
plotOutput("OccurrencePlot", width="80%")
}
})
output$downloadOccurrencePlot <- downloadHandler(
filename = function() { paste("downloadOccurrencePlot", '.png', sep='') },
content = function(file) {
position = seq(1, length(input$multiChainsCI))
png(file)
OccurrencePlot(selectData(), position, level = input$level, count=input$count, print.data.result = FALSE)
dev.off()
}
)
output$result22 <- renderUI({
if(is.null(dataInput()))
h5("No data imported")
else
tabsetPanel(tabPanel("Data", DT::dataTableOutput("DatasetCI")),
tabPanel("Credible intervals", uiOutput("resultTableMCI")),
tabPanel("HPD regions", uiOutput("resultTableMHPD")),
tabPanel("Intervals Plot", plotOutput("MultiDatesPlot")),
tabPanel("Tempo Plot", plotOutput("TempoPlot"), br(), plotOutput("TempoActivityPlot")),
tabPanel("Occurrence Plot", plotOutput("OccurrencePlot"))
)
})
#######################################
#### Onglet : Tests ###
## Selection plusieurs chaines ##
# Initialize reactive values
# valuesTests <- reactiveValues()
namesTests <- reactive({
names = colnames(dataInput())
return(names)
})
observe({
updateSelectInput(session, inputId='variableTest1a', 'Select date a', choices = namesTests())
updateSelectInput(session, inputId='variableTest1b', 'Select date b', choices = namesTests())
})
selectChainTests <- reactive({
dataInput()[,c(input$variableTest1a, input$variableTest1b), drop = FALSE]
})
output$DataSelectedTests <- renderDataTable({
if(is.null(selectChainTests())) { return( h5("")) }
else
datatable(selectChainTests(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
MarginalProbaText <- renderText({
MarginalProba(dataInput()[,input$variableTest1a, drop=TRUE], dataInput()[,input$variableTest1b, drop=TRUE])
})
output$MarginalProbaUI <- renderUI({
tags$div(
tags$p("The posterior probability that 'date a' is earlier than 'date b' is "),
tags$p(MarginalProbaText())
)
})
DatesHiatusText <- reactive({
DatesHiatus(dataInput()[,input$variableTest1a, drop=TRUE], dataInput()[,input$variableTest1b, drop=TRUE], level = input$levelTests)
})
output$DatesHiatusUI <- renderUI({
tags$div(
tags$p("The testing procedure to check the presence of a gap between 'date a' and 'date b'"),
tags$p("It returns the endpoints of the longest hiatus between two parameters. The result is given in calendar year (in format BC/AD)."),
br(),
tags$p("If 'NA', there is no hiatus at this level of confidence between 'date a' and 'date b'."),
br(),
tags$p("The inferior endpoint of the interval is ",DatesHiatusText()[2]),
tags$p("The superior endpoint of the interval is ",DatesHiatusText()[3])
)
})
output$resultTests <- renderUI({
if(is.null(dataInput()))
h5("No data imported")
else
tabsetPanel(tabPanel("Data selected", DT::dataTableOutput("DataSelectedTests")), tabPanel("Anteriority / Posteriority test", uiOutput("MarginalProbaUI")), tabPanel("Hiatus between dates", uiOutput("DatesHiatusUI")))
})
##############################################
#### Onglet : Group of dates ####
#### Selection d un group ###
dataGroup2 <- reactive({
as.data.frame(valuesG$file2)
})
namesGroups <- reactive({
names12 = colnames(dataGroup2())
return(names12)
})
observe({
updateSelectInput(session, inputId='variablesMin', 'Select the minimum of the group', choices = namesGroups())
updateSelectInput(session, inputId='variablesMax', 'Select the maximum of the group', choices = namesGroups())
})
selectChain2 <- reactive({
dataGroup2()[,c(input$variablesMin, input$variablesMax), drop = FALSE]
})
TestPhaseSelected <- reactive({
if( sum(ifelse(dataGroup2()[,1] < dataGroup2()[,2], 1, 0)) == length(dataGroup2()[,1])) {return(1)}
})
output$selectedTable2 <- renderDataTable({
if(is.null(selectChain2())) { return( h5("")) }
else
datatable(selectChain2(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
PhaseStatisticsText <- reactive({
PhaseStatistics(dataGroup2()[,input$variablesMin, drop=TRUE], dataGroup2()[,input$variablesMax, drop=TRUE], level = input$level2)
})
output$PhaseStatisticsUI <- renderTable({
res = PhaseStatisticsText()
if(is.null(res)) {return()}
else {
dim = dim(res)
names_PS = rownames(res)
Minimum = NULL
Maximum = NULL
Duration = NULL
name = NULL
for (i in 1:dim[1]){
name = c(name, names_PS[i])
Minimum= c(Minimum, res[i,2])
Maximum = c(Maximum, res[i,3])
Duration = c(Duration, res[i,3])
}
data.frame("names"=name, "Minimum"=Minimum, "Maximum" = Maximum, "Duration" = Duration)
}
})
PhaseTimeRangeText <- reactive({
PhaseTimeRange(dataGroup2()[,input$variablesMin, drop=TRUE],dataGroup2()[,input$variablesMax, drop=TRUE], level = input$level2)
})
output$PhaseTimeRangeUI <- renderUI({
res = PhaseTimeRangeText()
tags$div(
tags$p("For a level of confidence at ", res[1]*100, "%"),
tags$p("Time Range = [", res[2], "," ,res[3], "]")
)
})
output$PhasePlotFunction <- renderPlot({
PhasePlot(dataGroup2()[,input$variablesMin, drop=TRUE], dataGroup2()[,input$variablesMax, drop=TRUE], level = input$level2, title = input$titlePlot2, colors=input$color2 )
})
output$downloadGroupPlot <- downloadHandler(
filename = function() { paste("downloadGroupPlot", '.png', sep='') },
content = function(file) {
png(file)
PhasePlot(dataGroup2()[,input$variablesMin, drop=TRUE], dataGroup2()[,input$variablesMax, drop=TRUE], level = input$level2, title = input$titlePlot2, colors=input$color2 )
dev.off()
}
)
output$PhaseDurationPlotFunction <- renderUI({
PhaseDurationPlot(dataGroup2()[,input$variablesMin, drop=TRUE], dataGroup2()[,input$variablesMax, drop=TRUE], level = input$level2, title = "Duration of the phase", colors=input$color2 )
})
output$PhasePlotUI <- renderUI({
if(is.null(dataGroup2()))
{h5(" Nothing to display ")}
else{
plotOutput("PhasePlotFunction")
}
})
output$PhaseDurationPlotUI <- renderUI({
if(is.null(dataGroup2()))
{h5(" Nothing to display ")}
else{
plotOutput("PhaseDurationPlotFunction")
}
})
output$result3 <- renderUI({
if(is.null(dataGroup2()))
h5("No data imported")
else
tabsetPanel(tabPanel("Data selected", DT::dataTableOutput("selectedTable2")), tabPanel("Plot of the characteristics", fluidRow( uiOutput("PhasePlotUI")) ), tabPanel("Time range", uiOutput("PhaseTimeRangeUI")), tabPanel("Marginal Statistics", uiOutput("PhaseStatisticsUI")))
})
#####################################
#### Onglet : Several groups ####
# Initialize reactive values
phases <- reactiveValues()
output$PhasesSelection32 <- renderUI({
themes <- namesGroups()
succession$names <- themes
checkboxGroupInput('multiPhasesSelection32', 'Select the minimum and the maximum of each group:', themes)
})
# Add observer on select-all button
observeEvent(input$selectAll32, {
succession$names <- namesGroups()
updateCheckboxGroupInput(session, 'multiPhasesSelection32', selected = succession$names)
})
# Add observer on clear-all button
observeEvent(input$clearAll32, {
succession$names <- c()
updateCheckboxGroupInput(session, 'multiPhasesSelection32', selected = "none")
})
# data selectionnees
selectData32 <- reactive({
dataGroup2()[, input$multiPhasesSelection32, drop = FALSE]
})
# affichage table de donnees
output$DatasetPhases32 <- renderDataTable({
if(is.null(selectData32())){return()}
datatable(selectData32(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
Position_beginning32 <- reactive({
dim = dim(selectData32())[2]
pos = seq(1, dim, by = 2)
return(pos)
})
MultiPhaseTimeRangeFunction <- reactive({
MultiPhaseTimeRange(selectData32(), position_minimum = Position_beginning32(), level = input$levelMultiPhases)
})
output$MultiPhaseTimeRangeUI <- renderTable({
res = MultiPhaseTimeRangeFunction()
if(is.null(res)) {h5(" Nothing to display ")}
else {
dim = dim(res)
names_MTR = rownames(res)
PTInf = NULL
PTSup = NULL
names = NULL
for (i in 1:dim[1]){
names = c(names, names_MTR[i])
PTInf= c(PTInf, res[i,2])
PTSup = c(PTSup, res[i,3])
}
data.frame("names"=names,"Time Range Inf"=PTInf, "Time Range Sup" = PTSup)
}
})
output$MultiPhasePlotFunction <- renderPlot({
MultiPhasePlot(selectData32(), position_minimum = Position_beginning32(), title = input$titleMultiPhases, level = input$levelMultiPhases)
})
output$MultiPhasePlotUI <- renderUI({
if(is.null(selectData32()))
{h5(" Nothing to display ")}
else
plotOutput("MultiPhasePlotFunction")
})
output$result32 <- renderUI({
if(is.null(dataGroup2()))
h5("No data imported")
else
tabsetPanel(tabPanel("Data", DT::dataTableOutput("DatasetPhases32")), tabPanel("Time range", uiOutput("MultiPhaseTimeRangeUI")), tabPanel("Plot of the characteristics", "Marginal posterior densities of the minimum (oldest curve) and the maximum (youngest curve of the same color) of the selected groups and their time range interval (segment above the curves) at the desired level." ,uiOutput("MultiPhasePlotUI")))
})
output$downloadMultiPhasesPlot <- downloadHandler(
filename = function() { paste("downloadGroupsPlot", '.png', sep='') },
content = function(file) {
png(file)
MultiPhasePlot(selectData32(), position_minimum = Position_beginning32(), title = input$titleMultiPhases, level = input$levelMultiPhases)
dev.off()
}
)
##########################################
#### Onglet : Succession de phases ####
# Initialize reactive values
succession <- reactiveValues()
output$PhasesSelection <- renderUI({
themes <- namesGroups()
succession$names <- themes
checkboxGroupInput('multiPhasesSelection', 'Select the minimum and the maximum of each group:', themes)
})
# Add observer on select-all button
observeEvent(input$selectAll4, {
succession$names <- namesGroups()
updateCheckboxGroupInput(session, 'multiPhasesSelection', selected = succession$names)
})
# Add observer on clear-all button
observeEvent(input$clearAll4, {
succession$names <- c()
updateCheckboxGroupInput(session, 'multiPhasesSelection', selected = "none")
})
# data selectionnees
selectData4 <- reactive({
dataGroup2()[, input$multiPhasesSelection, drop = FALSE]
})
# affichage table de donnees
output$DatasetPhases <- renderDataTable({
if(is.null(selectData4())){return()}
datatable(selectData4(), options = list(pageLength = 5, dom = 'tip'), rownames=FALSE)
})
## Ordering
Position_beginning <- reactive({
ordre <- order(selectData4()[1,])
pos = seq(1,length(ordre), by = 2)
return(ordre[pos])
})
output$AffichagePositions <- renderUI({
tags$div(
tags$p("Positions of the beginnings 1", as.character(Position_beginning()[1]), ""),
tags$p("Positions of the beginnings 2", as.character(Position_beginning()[2])),
tags$p("Positions of the beginnings 3", as.character(Position_beginning()[3]), ""),
tags$p("Positions of the beginnings 4", as.character(Position_beginning()[4]))
)
})
## Succession plot
output$MultiSuccessionFunction <- renderPlot({
MultiSuccessionPlot(selectData4(), position_minimum = Position_beginning(), level = input$levelSuccession, title = input$titleSuccessionPlot)
}, height = 600, width = 800 )
output$MultiSuccessionUI <- renderUI({
if( length(Position_beginning() ) < 2)
h5(" Nothing to display ")
else
plotOutput("MultiSuccessionFunction", width="100%")
})
output$downloadSuccessionPlot <- downloadHandler(
filename = function() { paste("downloadSuccessionPlot", '.png', sep='') },
content = function(file) {
png(file)
MultiSuccessionPlot(selectData4(), position_minimum = Position_beginning(), level = input$levelSuccession, title = input$titleSuccessionPlot)
dev.off()
}
)
## Succession Transitions
MultiPhasesTransitionFunction <- reactive({
MultiPhasesTransition(selectData4(), position_minimum = Position_beginning(), level = input$levelSuccession)
})
output$MultiPhasesTransitionResults <- renderTable({
if( length(Position_beginning() ) < 2){ return()}
else {
res = MultiPhasesTransitionFunction()
dim = dim(res)
names_MTR = rownames(res)
TRInf = NULL
TRSup = NULL
names = NULL
for (i in 1:dim[1]){
names = c(names, names_MTR[i])
TRInf= c(TRInf, res[i,2])
TRSup = c(TRSup, res[i,3])
}
data.frame("names"=names,"Transition range Inf"=TRInf, "Transition range Sup" = TRSup)
}
})
## Succession Gaps
MultiPhasesGapFunction <- reactive({
MultiPhasesGap(selectData4(), position_minimum = Position_beginning(), level = input$levelSuccession)
})
output$MultiPhasesGapResults <- renderTable({
if( length(Position_beginning() ) < 2) { return()}
else {
res = MultiPhasesGapFunction()
dim = dim(res)
names_MPG = rownames(res)
GapInf = NULL
GapSup = NULL
names = NULL
for (i in 1:dim[1]){
names = c(names, names_MPG[i])
GapInf= c(GapInf, res[i,2])
GapSup = c(GapSup, res[i,3])
}
data.frame("names"=names,"Gap range Inf"= GapInf, "Gap range Sup" = GapSup)
}
})
## Succession Time range
MultiPhaseTimeRangeFunction4 <- reactive({
MultiPhaseTimeRange(selectData4(), position_minimum = Position_beginning(), level = input$levelSuccession)
})
output$MultiPhaseTimeRange4UI <- renderTable({
if( length(Position_beginning() ) < 2){ return()}
else {
res = MultiPhaseTimeRangeFunction4()
dim = dim(res)
names_MTR = rownames(res)
PTInf = NULL
PTSup = NULL
names = NULL
for (i in 1:dim[1]){
names = c(names, names_MTR[i])
PTInf= c(PTInf, res[i,2])
PTSup = c(PTSup, res[i,3])
}
data.frame("names"=names,"Time Range Inf"=PTInf, "Time Range Sup" = PTSup)
}
})
## Output
output$Inputs4 <- renderUI({
if(is.null(dataGroup2()))
h5("No data imported")
else
tabsetPanel(tabPanel("Data", DT::dataTableOutput("DatasetPhases")), tabPanel("Time ranges", uiOutput("MultiPhaseTimeRange4UI")), tabPanel("Transition ranges", uiOutput("MultiPhasesTransitionResults")), tabPanel("Gap ranges", uiOutput("MultiPhasesGapResults")), tabPanel("Succession plot", fluidRow("Curves represent the marginal posterior densities of the minimum and maximum of each group. Segments correspond to time range of the group of the same color, two-coloured segments correspond to transition interval or to the gap range. A cross instead of a two-coloured segment means that there is no gap range at the desired level of confidence."),fluidRow(uiOutput("MultiSuccessionUI")) ))
})
##########################################
})
|
329a04fed4bd6457480f70a84182c1993481b6ae
|
8585dd8814d82d9a0870804d8a5acf9ad650d0ed
|
/tests/testthat/test-hypothesis.r
|
fe7aa5f95a0e29e3626a4c4d11cd102747e026bb
|
[] |
no_license
|
brentonk/coefbounds
|
7500d38188c87b41c2b6ebdbef5f1d5f04517dce
|
7c7b65a7d34ecec01ac6a6f1062c4eeab24cab08
|
refs/heads/master
| 2021-01-17T19:17:25.817055
| 2016-06-28T21:33:03
| 2016-06-28T21:33:03
| 59,677,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,179
|
r
|
test-hypothesis.r
|
context("Interval hypothesis testing")
set.seed(1776)
x1 <- rnorm(100)
x2 <- rnorm(100)
yl <- rnorm(100)
yu <- yl + rexp(100)
fit <- coefbounds(yl + yu ~ x1 + x2, boot = 111)
test_that("interval_hypothesis() fails on bad input", {
fit_bad <- coefbounds(yl + yu ~ x1 + x2, boot = 0)
expect_error(interval_hypothesis(fit_bad,
term = "x1",
interval = c(0, 0)),
"no bootstrap")
expect_error(interval_hypothesis(fit,
term = c("x1", "x2"),
interval = c(0, 0)),
"multiple terms")
expect_error(interval_hypothesis(fit,
term = "x3",
interval = c(0, 0)),
"not in the model")
expect_error(interval_hypothesis(fit,
term = "x1",
interval = 0:2),
"length 2")
expect_error(interval_hypothesis(fit,
term = "x1",
interval = 1:0),
"exceeds upper bound")
})
test_that("interval_hypothesis() and confint() yield consistent results", {
## Directed hypothesis
hyp_x1 <- confint(fit,
parm = "x1",
level = 0.95,
type = "DU")
test_x1 <- interval_hypothesis(fit,
term = "x1",
interval = hyp_x1,
type = "subset")
expect_true(abs(test_x1$p - 0.05) < 1 / test_x1$n_boot)
## Undirected hypothesis
hyp_x2 <- confint(fit,
parm = "x2",
level = 0.90,
type = "CC")
hyp_x2 <- c(hyp_x2[1, 1], hyp_x2[2, 2])
test_x2 <- interval_hypothesis(fit,
term = "x2",
interval = hyp_x2,
type = "equal")
expect_true(abs(test_x2$p - 0.10) < 1 / test_x2$n_boot)
})
|
15c861130e46da659292062c3946f1748898ba3a
|
ce4e06b44516ffa6028b3c7046537ec1e36a384d
|
/man/replace_null.Rd
|
571a2dfb147b36560ad448d2fd35f45ee7a36883
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
DiseaseOntology/DO.utils
|
6bdac0491225559d104e652289e1cfc1e3768cc8
|
64d8b7c272228f10a9277b380b6864125fd2377f
|
refs/heads/main
| 2023-08-17T18:54:02.968998
| 2023-08-11T18:29:44
| 2023-08-11T18:29:44
| 379,923,811
| 1
| 0
|
CC0-1.0
| 2023-07-05T19:37:42
| 2021-06-24T12:52:22
|
R
|
UTF-8
|
R
| false
| true
| 670
|
rd
|
replace_null.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/replace-methods.R
\name{replace_null}
\alias{replace_null}
\title{Replace NULLs with specified value}
\usage{
replace_null(data, replace)
}
\arguments{
\item{data}{A list (or list column in a data frame).}
\item{replace}{A single value to use for replacement.}
}
\description{
Replace NULLs (in lists) with specified value. \code{replace_null} will recurse
into nested lists but will skip internal components that are not lists
themselves (e.g. data.frames, matrices, etc). NOTE that \code{replace} will also
be added to empty lists (i.e. \code{list()}) but not other zero-length vectors.
}
|
54ecdc074abb8cb106bd3f44fb446d4f087a8568
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dynamichazard/examples/ddhazard.Rd.R
|
f15c0abc5bb7f97e17332cb5fa5ec85bfb44788e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
ddhazard.Rd.R
|
library(dynamichazard)
### Name: ddhazard
### Title: Fitting Dynamic Hazard Models
### Aliases: ddhazard
### ** Examples
# example with first order model
library(dynamichazard)
fit <- ddhazard(
Surv(time, status == 2) ~ log(bili), pbc, id = pbc$id, max_T = 3600,
Q_0 = diag(1, 2), Q = diag(1e-4, 2), by = 50,
control = ddhazard_control(method = "GMA"))
plot(fit)
# example with second order model
fit <- ddhazard(
Surv(time, status == 2) ~ log(bili), pbc, id = pbc$id, max_T = 3600,
Q_0 = diag(1, 4), Q = diag(1e-4, 2), by = 50,
control = ddhazard_control(method = "GMA"),
order = 2)
plot(fit)
|
c7be0881a249fa53cd6e5b42f8a83840e048c925
|
2afc7ea170926ca71969e9177beadf69e3e7b25b
|
/man/data_package_shiny_handler.Rd
|
eff1b63cedffdfea35a0280e8f8a9da3520f7334
|
[
"MIT"
] |
permissive
|
mobb/datapie
|
7ffdbb806cc95cfbfcc08ed12987caa0f3a25750
|
23be1331e2bbb84b37693aa8317282a8e681ef21
|
refs/heads/master
| 2022-04-13T03:36:01.536365
| 2020-02-21T23:47:06
| 2020-02-21T23:47:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,312
|
rd
|
data_package_shiny_handler.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_package_shiny_handler.R
\name{data_package_shiny_handler}
\alias{data_package_shiny_handler}
\title{A data_package_* handler for the shiny app}
\usage{
data_package_shiny_handler(data.pkg.doi = NA, current.data = NULL,
download.dir = NULL)
}
\arguments{
\item{data.pkg.doi}{The doi of the package being downloaded.}
\item{current.data}{Current data loaded in the app, if any.}
\item{download.dir}{The download directory.}
}
\value{
Returns a list of tibbles containing data and metadata for a given data
package. Attributes include information about the doi and download folder,
where available.
}
\description{
A data_package_* handler for the shiny app
}
\details{
This function is largely meant to interact with the main Shiny app used in
this package.
If no inputs are provided, the function returns a default data set. The name
of the tibble in this dataset is "Example_dataset.csv". The doi attribute is
set to NA. The folder attribute is also set to NA.
}
\examples{
\dontrun{
#Load in the example data
data_package <- data_package_shiny_handler()
#Load an actual data set
doi_string <- "doi:10.6073/pasta/dd7955138eb963a847b861242390a48c"
data_package <- data_package_shiny_handler(data.pkg.doi = doi_string))
}
}
|
747cf8ecab5dae840c932055ce92b221275614f2
|
9540aa50a146f51f564c01276ded066b5fa834fe
|
/fun/plot_sim_intv.R
|
b360c86a3714fdbf979cee1f245464e69a0b6ec9
|
[] |
no_license
|
kklot/india-subnational-tbmodel
|
feebfe2c15661d53c7d6e89c3a4f00538d2d6dd3
|
c365a681624d6c2cba6bbd7b287c4f61c726472b
|
refs/heads/master
| 2023-04-06T03:54:47.961703
| 2019-12-24T15:27:27
| 2019-12-24T15:27:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,971
|
r
|
plot_sim_intv.R
|
plot_sim_intv <- function (
itvs # Takes object with intervention results
) {
library(ggplot2)
library(gridExtra)
x <- seq(2026 - length(itvs$inc), 2025, by = 1)
fields <- c('inc', 'mort')
titles <- c('Incidence', 'Mortality', 'Incremental Cost')
if (size(itvs$inc, 1) > 1) {
runtype <- 'mcmc'
} else{
runtype <- 'mle'
}
fs <- 11
red <- c(216, 23, 37) / 255
grey = c(0.9, 0.9, 0.9)
lw <- 1
xlimits <- c(2018, 2026)
plots <- vector("list", length(fields))
for (ii in 1:length(fields)) {
df <- data.frame(
year = rep(x, 2),
y = as.numeric(itvs[[fields[ii]]]),
projection = c(rep('Baseline', length(x)), rep('Intervention', length(x)))
)
p <- ggplot(df, aes(x = year, y = y, col = projection)) +
geom_line(size = lw) +
ylim(0, max(df$y) * 1.1)
# Finished line plot
p <- p + labs(title = titles[ii], x = "Year", y = "Rate per 100K") +
theme_classic()
plots[[ii]] <- p
}
df <- data.frame(
Cost = c(
itvs$icr_all,
itvs$ic_fl,
itvs$ic_sl,
itvs$ic_sm,
itvs$ic_xp,
itvs$ic_xr
),
Item = c("Total", "FL", "SL", "smear", "xpert", "xray")
)
p <- ggplot(data = df, aes(x = Item, y = Cost)) +
geom_bar(stat = "identity", fill = "steelblue")
p <- p + labs(title = "Incremental Cost", x = "Item", y = "Cost (USD)")
plots[[3]] <- p
df <- data.frame(
y = c(itvs$inc_av_pr, itvs$mort_av_pr),
x = c("Incidence", "Mortality")
)
p <- ggplot(data = df, aes(x = x, y = y * 100)) +
geom_bar(stat = "identity", fill = "steelblue")
p <-
p + labs(title = "Relative cumulative reductions 2017-2025", x = "Indicator", y = "Reduction(%)")
plots[[4]] <- p
grid.arrange(plots[[1]], plots[[2]], plots[[3]], plots[[4]], nrow = 2)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.